mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- mod$estimate[2] - mod$estimate[1]
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(1000, {
# t.test is faster than lm
mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- mod$estimate[2] - mod$estimate[1]
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(1000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
eff <- mod$estimate[2] - mod$estimate[1]
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(1000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
eff <- mod$estimate[2] - mod$estimate[1]
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(1000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
eff <- mod$estimate[2] - mod$estimate[1]
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(1000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
eff <- with(mod, estimate[2] - estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(1000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
eff <- with(mod, estimate[2] - estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(1000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
eff <- with(mod, estimate[2] - estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(3000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
eff <- with(mod, estimate[2] - estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(3000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(3000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(3000, {
# t.test is faster than lm
# mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
mod <- t.test(df$pF_hat ~ df$group, var.equal=TRUE)
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(3000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(3000, {
# t.test is faster than lm
# mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(3000, {
# t.test is faster than lm
# mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(3000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
# mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(3000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
# mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(5000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
# mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(5000, {
# t.test is faster than lm
# mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(5000, {
# t.test is faster than lm
# mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(7000, {
# t.test is faster than lm
# mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(8000, {
# t.test is faster than lm
# mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(8000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
# mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(8000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
# mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(10000, {
# t.test is faster than lm
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
# mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
eff <- (mod$estimate[2] - mod$estimate[1])
# mod <- lm(pF_hat ~ group, data=df)
# eff <- coef(mod)['groupB']
# pval <- anova(mod) %>% extract2('Pr(>F)') %>% extract(1)
system.time(replicate(10000, {
# t.test is faster than lm
# mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
mod <- t.test(pF_hat ~ group, var.equal=TRUE, data=df)
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
}))
analyze.unpaired <- function(df) {
# check for constancy
df$pF_hat[df$group == 'A'] %<>% fix.constancy
df$pF_hat[df$group == 'B'] %<>% fix.constancy
if(length(unique(df$stimulus))>1) {
# for speed reasons, just collapse across stimuli for now, lmer doesn't
# change things b/c we're generating data with fixed effects (subject to truncation)
df <- df %>% do_aggregate(pF_hat ~ group + sid, mean)
}
# t.test is faster than lm
# with() is slightly faster than data=df
mod <- with(df, t.test(pF_hat ~ group, var.equal=TRUE))
eff <- (mod$estimate[2] - mod$estimate[1])
pval <- mod$p.value
return (c('pval'=pval, 'eff' = eff))
}
read.csv(file='~/Dropbox/MultisensoryIntegration/Papers/InferenceInMcGurk/data/us_mcg_only.csv',
row.names=NULL) %>% nrow
#
# McGurk power analysis
#
setwd('~/Dropbox/MultisensoryIntegration/Papers/McGurk_MTurk_Compare/code/power_analysis_output/')
source('../power_analysis_functions.R')
source('power_analysis_graphs.R')
# setup initial simulation parameters
simulation_parameters <- list(
N=NA,
n_stimuli=1,
n_trials=10,
group_type='unpaired',
delta=function(pF) pF + 0.1725,
limit=function(pF) clip(pF, limits = c(0.05, 0.95))
)
Ns <- c(20, 30, 40, 50, 100, 150, 200, 300)
deltas <-  c(.0945, .1725, .33, .5)
delta_to_pop_round <- c(5, 10, 20, 30)/100
pop_df <- get_fusion_df()
# get population level effect
set.seed(122)
## Figure 2
# get the long-run behavior of N=30 vs. N=300
simulation_parameters$n_stimuli <- 1
simulation_parameters$N <- 300
n300 <- do_sim(simulation_parameters, chunk_size = 1000, n.rep = 5)
for(ii in 2:10) n300 <- rbind(n300, do_sim(simulation_parameters, chunk_size = 1000, n.rep = 5))
#attr(n300, 'pars') <- simulation_parameters
analyze_sim_results(n300)
attr(n300, 'pars') <- simulation_parameters
#attr(n300, 'pars') <- simulation_parameters
analyze_sim_results(n300)
plot.power_hist(n300, .1)
saveRDS(n300, 'n300.RDS')
plot.power_hist(n300, .1)
simulation_parameters$N <- 30
n30 <- do_sim(simulation_parameters, chunk_size = 1000, n.rep = 50)
analyze_sim_results(n30)
analyze_sim_results
analyze_sim_results(n30)
set.seed(122)
simulation_parameters$N <- 30
n30 <- do_sim(simulation_parameters, chunk_size = 1000, n.rep = 50)
# summary statistics from the result of do_sim
analyze_sim_results <- function(res, pop_delta=.1) {
sim_pars <- attr(res, 'pars')
pvals <- res[,'pval']
est <- res[,2]
stopifnot(!identical(pvals, est))
mean_non_sig <- abs_eff_est <- mean_est <- eff_too_big <-
non_sig_too_small <- eff_wrong_sign <- eff_inflation <- NA
if(length(which(pvals < 0.05)) > 0) {
eff_estimates <- est[which(pvals < 0.05)]
eff_inflation <- mean(abs(eff_estimates/pop_delta))
eff_wrong_sign <- mean(sign(pop_delta) != sign(eff_estimates))
eff_too_big <- mean(eff_estimates > pop_delta)
mean_est <- mean(eff_estimates)
abs_eff_est <- mean(abs(eff_estimates))
}
if(length(which(pvals >= 0.05)) > 0) {
ns_eff <- est[which(pvals >= 0.05)]
mean_non_sig <- mean(ns_eff)
non_sig_too_small <- mean(ns_eff < pop_delta)
}
pow <- mean(pvals < 0.05, na.rm=TRUE)
n.rep <- nrow(res)
res <- matrix(c(n.rep, par_vec(sim_pars), pop_delta, pow, mean_est, abs_eff_est, eff_inflation, eff_wrong_sign, eff_too_big, mean_non_sig, non_sig_too_small),
nrow=1)
res %<>% set_colnames(c('n.rep', 'N', 'n_stim', 'n_trials', 'is_paired', 'delta', 'pop_eff',
'power', 'mean Eff Est', 'abs Eff Est', 'mean_abs_ratio', 'p(wrong_sign)', 'p(sig > pop_eff)', 'mean non sig est', 'p(non-sig < pop_eff)'))
return(res)
}
analyze_sim_results(n30)
plot.power_hist <- function(result, pop_delta, ymax=3000, breaks=seq(-1, 1, by=0.005), col.ns = 'gray50', col.sig='dodgerblue3', xlim=c(-.3, .55)) {
ps <- result[,'pval']
effs <- result[,2]
draw.hist <- function(x, col, lwd = 4, add=FALSE, min_count = round(.001 * ymax)) {
res <- hist(x, plot=FALSE, breaks=breaks)
if(!add) plot.clean(xlim, c(0,ymax))
rc <- smooth(res$counts, twiceit = FALSE)
rc[rc <= 5] = NA
keep.ind <- res$mids >= -.3 & res$mids <= .55
points(res$mids[keep.ind], rc[keep.ind], type='h', lwd=1, lend=2, col=col)
return(max(res$counts))
}
draw.hist(effs, getAlphaRGB(col.ns, alpha=150))
est_eff <- effs[ps<0.05]
draw.hist(est_eff, col=getAlphaRGB(col.sig, 150), add=TRUE)
abline(v=pop_delta, lwd=1.5, lty=2, col='gray30')
draw.axis(1, at=-1:2 / 4)
draw.axis(2, at=c(0, ymax), tcl=0)
}
simulation_parameters
analyze_sim_results(n300)
plot.power_hist(n300, .1)
analyze_sim_results(n30)
plot.power_hist(n30, .1)
# write out the figures
ph.pdf(n300, .1, fname='n300_power_hist.pdf')
ph.pdf(n30, .1, fname='n30_power_hist.pdf')
# PDF version of the above plot
pdf.pgb <- to_pdf(plot.grp_bplot, w=1.25, h = 1.75, mar=rep(0.5, 4))
plot.power_hist <- function(result, pop_delta, ymax=2500, breaks=seq(-1, 1, by=0.005), col.ns = 'gray50', col.sig='dodgerblue3', xlim=c(-.3, .55)) {
ps <- result[,'pval']
effs <- result[,2]
draw.hist <- function(x, col, lwd = 4, add=FALSE, min_count = round(.001 * ymax)) {
res <- hist(x, plot=FALSE, breaks=breaks)
if(!add) plot.clean(xlim, c(0,ymax))
rc <- smooth(res$counts, twiceit = FALSE)
rc[rc <= 5] = NA
keep.ind <- res$mids >= -.3 & res$mids <= .55
points(res$mids[keep.ind], rc[keep.ind], type='h', lwd=1, lend=2, col=col)
return(max(res$counts))
}
draw.hist(effs, getAlphaRGB(col.ns, alpha=150))
est_eff <- effs[ps<0.05]
draw.hist(est_eff, col=getAlphaRGB(col.sig, 150), add=TRUE)
abline(v=pop_delta, lwd=1.5, lty=2, col='gray30')
draw.axis(1, at=-1:2 / 4)
draw.axis(2, at=c(0, ymax), tcl=0)
}
plot.power_hist <- function(result, pop_delta, ymax=2500, breaks=seq(-1, 1, by=0.005), col.ns = 'gray50', col.sig='dodgerblue3', xlim=c(-.3, .55)) {
ps <- result[,'pval']
effs <- result[,2]
draw.hist <- function(x, col, lwd = 4, add=FALSE, min_count = round(.001 * ymax)) {
res <- hist(x, plot=FALSE, breaks=breaks)
if(!add) plot.clean(xlim, c(0,ymax))
rc <- smooth(res$counts, twiceit = FALSE)
rc[rc <= 5] = NA
keep.ind <- res$mids >= -.3 & res$mids <= .55
points(res$mids[keep.ind], rc[keep.ind], type='h', lwd=1, lend=2, col=col)
return(max(res$counts))
}
draw.hist(effs, getAlphaRGB(col.ns, alpha=150))
est_eff <- effs[ps<0.05]
draw.hist(est_eff, col=getAlphaRGB(col.sig, 150), add=TRUE)
abline(v=pop_delta, lwd=1.5, lty=2, col='gray30')
draw.axis(1, at=-1:2 / 4)
draw.axis(2, at=c(0, ymax), tcl=0)
}
#pdf version of the above plot
ph.pdf <- to_pdf(plot.power_hist, 2.15, 1.75, mar=rep(0.5, 4))
plot.power_hist(n30, .1)
# write out the figures
ph.pdf(n300, .1, fname='n300_power_hist.pdf')
ph.pdf(n30, .1, fname='n30_power_hist.pdf')
plot.power_hist <- function(result, pop_delta, ymax=2250, breaks=seq(-1, 1, by=0.005), col.ns = 'gray50', col.sig='dodgerblue3', xlim=c(-.3, .55)) {
ps <- result[,'pval']
effs <- result[,2]
draw.hist <- function(x, col, lwd = 4, add=FALSE, min_count = round(.001 * ymax)) {
res <- hist(x, plot=FALSE, breaks=breaks)
if(!add) plot.clean(xlim, c(0,ymax))
rc <- smooth(res$counts, twiceit = FALSE)
rc[rc <= 5] = NA
keep.ind <- res$mids >= -.3 & res$mids <= .55
points(res$mids[keep.ind], rc[keep.ind], type='h', lwd=1, lend=2, col=col)
return(max(res$counts))
}
draw.hist(effs, getAlphaRGB(col.ns, alpha=150))
est_eff <- effs[ps<0.05]
draw.hist(est_eff, col=getAlphaRGB(col.sig, 150), add=TRUE)
abline(v=pop_delta, lwd=1.5, lty=2, col='gray30')
draw.axis(1, at=-1:2 / 4)
draw.axis(2, at=c(0, ymax), tcl=0)
}
#pdf version of the above plot
ph.pdf <- to_pdf(plot.power_hist, 2.15, 1.75, mar=rep(0.5, 4))
# write out the figures
ph.pdf(n300, .1, fname='n300_power_hist.pdf')
ph.pdf(n30, .1, fname='n30_power_hist.pdf')
plot.power_hist <- function(result, pop_delta, ymax=2000, breaks=seq(-1, 1, by=0.005), col.ns = 'gray50', col.sig='dodgerblue3', xlim=c(-.3, .55)) {
ps <- result[,'pval']
effs <- result[,2]
draw.hist <- function(x, col, lwd = 4, add=FALSE, min_count = round(.001 * ymax)) {
res <- hist(x, plot=FALSE, breaks=breaks)
if(!add) plot.clean(xlim, c(0,ymax))
rc <- smooth(res$counts, twiceit = FALSE)
rc[rc <= 5] = NA
keep.ind <- res$mids >= -.3 & res$mids <= .55
points(res$mids[keep.ind], rc[keep.ind], type='h', lwd=1, lend=2, col=col)
return(max(res$counts))
}
draw.hist(effs, getAlphaRGB(col.ns, alpha=150))
est_eff <- effs[ps<0.05]
draw.hist(est_eff, col=getAlphaRGB(col.sig, 150), add=TRUE)
abline(v=pop_delta, lwd=1.5, lty=2, col='gray30')
draw.axis(1, at=-1:2 / 4)
draw.axis(2, at=c(0, ymax), tcl=0)
}
#pdf version of the above plot
ph.pdf <- to_pdf(plot.power_hist, 2.15, 1.75, mar=rep(0.5, 4))
# write out the figures
ph.pdf(n300, .1, fname='n300_power_hist.pdf')
ph.pdf(n30, .1, fname='n30_power_hist.pdf')
analyze_sim_results(n300)
analyze_sim_results(n30)
# get n=30 effect
set.seed(122)
simulation_parameters$N <- 30
n30_eff <- run_one_sim(simulation_parameters, pop_df, summary=TRUE)
pdf.pgb(n30_eff, fname='n30_ex_plot.pdf')
pdf.pgb(n30_eff, fname='n30_ex_plot.pdf')
n30_eff
n30_eff %>% do_aggregate(pF_hat ~ group)
n30_eff %>% do_aggregate(pF_hat ~ group, m_sd)
# get n=300 effect
set.seed(122)
simulation_parameters$N <- 300
simulation_parameters$n_stimuli = 'McGr2'
# get n=30 effect
set.seed(122)
simulation_parameters$N <- 30
n30_eff <- run_one_sim(simulation_parameters, pop_df, summary=TRUE)
pdf.pgb(n30_eff, fname='n30_ex_plot.pdf')
# get n=300 effect
set.seed(122)
simulation_parameters$N <- 300
simulation_parameters$n_stimuli = 'McGr2'
n300_eff <- run_one_sim(simulation_parameters, pop_df, summary=TRUE)
pdf.pgb(n300_eff, fname='n300_ex_plot.pdf')
analyze_sim_results(n300)
5*.33 + 13*.67
analyze_sim_results(n30)
read.csv(file='power_results_increasing_delta.csv')
read.csv(file='power_results_increasing_stimuli.csv')
1.588786
read.csv(file='power_results_increasing_stimuli2.csv')
1.609467
read.csv(file='power_results_increasing_delta.csv')
power_curves <- list.files(pattern='csv') %>% sort %>% sapply(read.csv, USE.NAMES=TRUE, simplify=FALSE)
names(power_curves)
# combine the separate simulations into a single file
inc_delta <- combine_pc(power_curves[[1]], power_curves[[2]])
inc_stim <- combine_pc(power_curves[[3]], power_curves[[4]])
inc_trial <- combine_pc(power_curves[[5]], power_curves[[6]])
# add n_stim==1 to pc2 and n_trials==10 to pc3
inc_stim <- rbind(inc_delta %>% subset((.)$pop_eff == 0.1), inc_stim)
inc_trial <- rbind(inc_delta %>% subset((.)$pop_eff == 0.1), inc_trial)
inc_stim
(1.6 - 1.8) / 1.8
sd(n30[,2])
sd(n300[,2])
mean(n300[,2])
mean(n30[,2])
sd(n300[,2])*2
sd(n30[,2])*2
10 + 26
10 - 26
analyze_sim_results(n30)
analyze_sim_results
