In this set of simulations, we consider settings with both null and non-null
tests with varying proportion of null tests. An informative covariate is included
in the setting as described in simulations-informative-sine.Rmd
.
The simulations are identical to those performed in simulations-varyingpi0.Rmd
except that the strength of the "singal" (the effect size for non-null tests)
is weaker (smaller).
library(dplyr) library(ggplot2) library(SummarizedBenchmark) library(parallel) ## load helper functions for (f in list.files("../R", "\\.(r|R)$", full.names = TRUE)) { source(f) } ## project data/results folders resdir <- "results" dir.create(resdir, showWarnings = FALSE, recursive = TRUE) ## intermediary files we create below null05_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop05.rds") null10_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop10.rds") null20_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop20.rds") null30_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop30.rds") null40_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop40.rds") null50_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop50.rds") null60_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop60.rds") null70_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop70.rds") null80_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop80.rds") null90_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop90.rds") null95_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop95.rds") null99_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop99.rds") ## number of cores for parallelization cores <- 20 B <- 100 ## define bechmarking design bd <- initializeBenchDesign()
As described in simulations-null.Rmd
, we include Scott's FDR Regression in the analysis
for simulations with Gaussian noise. Again, we include both
nulltype = "empirical"
and nulltype = "theoretical"
. Since all settings in this
series of simulations use test statistics simulated with Gaussian noise, we include
Scott's FDR Regression in all of the comparisons.
bdplus <- bd bdplus <- addBMethod(bdplus, "fdrreg-t", FDRreg::FDRreg, function(x) { x$FDR }, z = test_statistic, features = model.matrix( ~ splines::bs(ind_covariate, df = 3) - 1), nulltype = 'theoretical', control = list(lambda = 0.01)) bdplus <- addBMethod(bdplus, "fdrreg-e", FDRreg::FDRreg, function(x) { x$FDR }, z = test_statistic, features = model.matrix( ~ splines::bs(ind_covariate, df = 3) - 1), nulltype = 'empirical', control = list(lambda = 0.01))
All simulation settings will share the following parameters.
m <- 20000 # integer: number of hypothesis tests es_dist <- rnorm_generator(2) # functional: dist of alternative test stats ts_dist <- rt_perturber(11) # functional: sampling dist/noise for test stats null_dist <- rt_2pvaluer(11) # functional: dist to calc p-values icovariate <- runif # functional: independent covariate
Simulation results will be presented excluding a subset of methods, and for certain plots (upset plots), a single alpha cutoff will be used.
excludeSet <- c("unadjusted", "bl-df02", "bl-df04", "bl-df05") ualpha <- 0.05
First, we consider the setting where 5% of tests are null.
pi0 <- pi0_sine(0.05) # numeric: proportion of null hypotheses seed <- 1010
We next run the simulations.
if (file.exists(null05_file)) { res <- readRDS(null05_file) } else { res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m, pi0 = pi0, es_dist = es_dist, icovariate = icovariate, ts_dist = ts_dist, null_dist = null_dist, seed = seed, mc.cores = cores) saveRDS(res, file = null05_file) } res_i <- lapply(res, `[[`, "informative") res_u <- lapply(res, `[[`, "uninformative")
Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.
onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist, icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")
strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)
We plot the averaged results across r B
replications.
resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01)) plotsim_average(resdf, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE)
We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.
covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size") covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")
Finally, (if enough methods produce rejections at r ualpha
) we take a look at
the overlap of rejections between methods.
if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) { aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE) } else { message("Not enough methods found rejections at alpha ", ualpha, "; skipping upset plot") }
We also compare the simulation results with and without an informative covariate.
resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01)) resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key, performanceMetric, alpha, value), select(resdfu, rep, blabel, param.alpha, key, performanceMetric, alpha, value), by = c("rep", "blabel", "param.alpha", "key", "performanceMetric", "alpha"), suffix = c(".info", ".uninfo")) resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo) plotsim_average(resdfiu, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
Next, we consider the setting where 10% of tests are null.
pi0 <- pi0_sine(0.10) # numeric: proportion of null hypotheses seed <- 1201
We next run the simulations.
if (file.exists(null10_file)) { res <- readRDS(null10_file) } else { res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m, pi0 = pi0, es_dist = es_dist, icovariate = icovariate, ts_dist = ts_dist, null_dist = null_dist, seed = seed, mc.cores = cores) saveRDS(res, file = null10_file) } res_i <- lapply(res, `[[`, "informative") res_u <- lapply(res, `[[`, "uninformative")
Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.
onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist, icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")
strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)
We plot the averaged results across r B
replications.
resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01)) plotsim_average(resdf, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE)
We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.
covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size") covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")
Finally, (if enough methods produce rejections at r ualpha
) we take a look at
the overlap of rejections between methods.
if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) { aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE) } else { message("Not enough methods found rejections at alpha ", ualpha, "; skipping upset plot") }
We also compare the simulation results with and without an informative covariate.
resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01)) resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key, performanceMetric, alpha, value), select(resdfu, rep, blabel, param.alpha, key, performanceMetric, alpha, value), by = c("rep", "blabel", "param.alpha", "key", "performanceMetric", "alpha"), suffix = c(".info", ".uninfo")) resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo) plotsim_average(resdfiu, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
Next, we consider the setting where 20% of tests are null.
pi0 <- pi0_sine(0.20) # numeric: proportion of null hypotheses seed <- 51
We next run the simulations.
if (file.exists(null20_file)) { res <- readRDS(null20_file) } else { res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m, pi0 = pi0, es_dist = es_dist, icovariate = icovariate, ts_dist = ts_dist, null_dist = null_dist, seed = seed, mc.cores = cores) saveRDS(res, file = null20_file) } res_i <- lapply(res, `[[`, "informative") res_u <- lapply(res, `[[`, "uninformative")
Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.
onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist, icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")
strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)
We plot the averaged results across r B
replications.
resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01)) plotsim_average(resdf, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE)
We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.
covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size") covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")
Finally, (if enough methods produce rejections at r ualpha
) we take a look at
the overlap of rejections between methods.
if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) { aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE) } else { message("Not enough methods found rejections at alpha ", ualpha, "; skipping upset plot") }
We also compare the simulation results with and without an informative covariate.
resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01)) resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key, performanceMetric, alpha, value), select(resdfu, rep, blabel, param.alpha, key, performanceMetric, alpha, value), by = c("rep", "blabel", "param.alpha", "key", "performanceMetric", "alpha"), suffix = c(".info", ".uninfo")) resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo) plotsim_average(resdfiu, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
Next, we consider the setting where 30% of tests are null.
pi0 <- pi0_sine(0.30) # numeric: proportion of null hypotheses seed <- 511
We next run the simulations.
if (file.exists(null30_file)) { res <- readRDS(null30_file) } else { res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m, pi0 = pi0, es_dist = es_dist, icovariate = icovariate, ts_dist = ts_dist, null_dist = null_dist, seed = seed, mc.cores = cores) saveRDS(res, file = null30_file) } res_i <- lapply(res, `[[`, "informative") res_u <- lapply(res, `[[`, "uninformative")
Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.
onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist, icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")
strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)
We plot the averaged results across r B
replications.
resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01)) plotsim_average(resdf, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE)
We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.
covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size") covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")
Finally, (if enough methods produce rejections at r ualpha
) we take a look at
the overlap of rejections between methods.
if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) { aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE) } else { message("Not enough methods found rejections at alpha ", ualpha, "; skipping upset plot") }
We also compare the simulation results with and without an informative covariate.
resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01)) resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key, performanceMetric, alpha, value), select(resdfu, rep, blabel, param.alpha, key, performanceMetric, alpha, value), by = c("rep", "blabel", "param.alpha", "key", "performanceMetric", "alpha"), suffix = c(".info", ".uninfo")) resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo) plotsim_average(resdfiu, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
Next, we consider the setting where 40% of tests are null.
pi0 <- pi0_sine(0.40) # numeric: proportion of null hypotheses seed <- 515
We next run the simulations.
if (file.exists(null40_file)) { res <- readRDS(null40_file) } else { res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m, pi0 = pi0, es_dist = es_dist, icovariate = icovariate, ts_dist = ts_dist, null_dist = null_dist, seed = seed, mc.cores = cores) saveRDS(res, file = null40_file) } res_i <- lapply(res, `[[`, "informative") res_u <- lapply(res, `[[`, "uninformative")
Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.
onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist, icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")
strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)
We plot the averaged results across r B
replications.
resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01)) plotsim_average(resdf, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE)
We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.
covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size") covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")
Finally, (if enough methods produce rejections at r ualpha
) we take a look at
the overlap of rejections between methods.
if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) { aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE) } else { message("Not enough methods found rejections at alpha ", ualpha, "; skipping upset plot") }
We also compare the simulation results with and without an informative covariate.
resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01)) resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key, performanceMetric, alpha, value), select(resdfu, rep, blabel, param.alpha, key, performanceMetric, alpha, value), by = c("rep", "blabel", "param.alpha", "key", "performanceMetric", "alpha"), suffix = c(".info", ".uninfo")) resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo) plotsim_average(resdfiu, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
Next, we consider the setting where 50% of tests are null.
pi0 <- pi0_sine(0.50) # numeric: proportion of null hypotheses seed <- 500
We next run the simulations.
if (file.exists(null50_file)) { res <- readRDS(null50_file) } else { res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m, pi0 = pi0, es_dist = es_dist, icovariate = icovariate, ts_dist = ts_dist, null_dist = null_dist, seed = seed, mc.cores = cores) saveRDS(res, file = null50_file) } res_i <- lapply(res, `[[`, "informative") res_u <- lapply(res, `[[`, "uninformative")
Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.
onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist, icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")
strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)
We plot the averaged results across r B
replications.
resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01)) plotsim_average(resdf, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE)
We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.
covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size") covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")
Finally, (if enough methods produce rejections at r ualpha
) we take a look at
the overlap of rejections between methods.
if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) { aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE) } else { message("Not enough methods found rejections at alpha ", ualpha, "; skipping upset plot") }
We also compare the simulation results with and without an informative covariate.
resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01)) resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key, performanceMetric, alpha, value), select(resdfu, rep, blabel, param.alpha, key, performanceMetric, alpha, value), by = c("rep", "blabel", "param.alpha", "key", "performanceMetric", "alpha"), suffix = c(".info", ".uninfo")) resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo) plotsim_average(resdfiu, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
Next, we consider the setting where 60% of tests are null.
pi0 <- pi0_sine(0.60) # numeric: proportion of null hypotheses seed <- 1502
We next run the simulations.
if (file.exists(null60_file)) { res <- readRDS(null60_file) } else { res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m, pi0 = pi0, es_dist = es_dist, icovariate = icovariate, ts_dist = ts_dist, null_dist = null_dist, seed = seed, mc.cores = cores) saveRDS(res, file = null60_file) } res_i <- lapply(res, `[[`, "informative") res_u <- lapply(res, `[[`, "uninformative")
Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.
onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist, icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")
strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)
We plot the averaged results across r B
replications.
resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01)) plotsim_average(resdf, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE)
We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.
covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size") covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")
Finally, (if enough methods produce rejections at r ualpha
) we take a look at
the overlap of rejections between methods.
if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) { aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE) } else { message("Not enough methods found rejections at alpha ", ualpha, "; skipping upset plot") }
We also compare the simulation results with and without an informative covariate.
resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01)) resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key, performanceMetric, alpha, value), select(resdfu, rep, blabel, param.alpha, key, performanceMetric, alpha, value), by = c("rep", "blabel", "param.alpha", "key", "performanceMetric", "alpha"), suffix = c(".info", ".uninfo")) resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo) plotsim_average(resdfiu, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
Next, we consider the setting where 70% of tests are null.
pi0 <- pi0_sine(0.70) # numeric: proportion of null hypotheses seed <- 1722
We next run the simulations.
if (file.exists(null70_file)) { res <- readRDS(null70_file) } else { res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m, pi0 = pi0, es_dist = es_dist, icovariate = icovariate, ts_dist = ts_dist, null_dist = null_dist, seed = seed, mc.cores = cores) saveRDS(res, file = null70_file) } res_i <- lapply(res, `[[`, "informative") res_u <- lapply(res, `[[`, "uninformative")
Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.
onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist, icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")
strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)
We plot the averaged results across r B
replications.
resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01)) plotsim_average(resdf, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE)
We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.
covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size") covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")
Finally, (if enough methods produce rejections at r ualpha
) we take a look at
the overlap of rejections between methods.
if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) { aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE) } else { message("Not enough methods found rejections at alpha ", ualpha, "; skipping upset plot") }
We also compare the simulation results with and without an informative covariate.
resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01)) resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key, performanceMetric, alpha, value), select(resdfu, rep, blabel, param.alpha, key, performanceMetric, alpha, value), by = c("rep", "blabel", "param.alpha", "key", "performanceMetric", "alpha"), suffix = c(".info", ".uninfo")) resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo) plotsim_average(resdfiu, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
Next, we consider the setting where 80% of tests are null.
pi0 <- pi0_sine(0.80) # numeric: proportion of null hypotheses seed <- 608
We next run the simulations.
if (file.exists(null80_file)) { res <- readRDS(null80_file) } else { res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m, pi0 = pi0, es_dist = es_dist, icovariate = icovariate, ts_dist = ts_dist, null_dist = null_dist, seed = seed, mc.cores = cores) saveRDS(res, file = null80_file) } res_i <- lapply(res, `[[`, "informative") res_u <- lapply(res, `[[`, "uninformative")
Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.
onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist, icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")
strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)
We plot the averaged results across r B
replications.
resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01)) plotsim_average(resdf, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE)
We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.
covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size") covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")
Finally, (if enough methods produce rejections at r ualpha
) we take a look at
the overlap of rejections between methods.
if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) { aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE) } else { message("Not enough methods found rejections at alpha ", ualpha, "; skipping upset plot") }
We also compare the simulation results with and without an informative covariate.
resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01)) resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key, performanceMetric, alpha, value), select(resdfu, rep, blabel, param.alpha, key, performanceMetric, alpha, value), by = c("rep", "blabel", "param.alpha", "key", "performanceMetric", "alpha"), suffix = c(".info", ".uninfo")) resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo) plotsim_average(resdfiu, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
Next, we consider the setting where 90% of tests are null.
pi0 <- pi0_sine(0.90) # numeric: proportion of null hypotheses seed <- 808
We next run the simulations.
if (file.exists(null90_file)) { res <- readRDS(null90_file) } else { res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m, pi0 = pi0, es_dist = es_dist, icovariate = icovariate, ts_dist = ts_dist, null_dist = null_dist, seed = seed, mc.cores = cores) saveRDS(res, file = null90_file) } res_i <- lapply(res, `[[`, "informative") res_u <- lapply(res, `[[`, "uninformative")
Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.
onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist, icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")
strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)
We plot the averaged results across r B
replications.
resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01)) plotsim_average(resdf, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE)
We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.
covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size") covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")
Finally, (if enough methods produce rejections at r ualpha
) we take a look at
the overlap of rejections between methods.
if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) { aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE) } else { message("Not enough methods found rejections at alpha ", ualpha, "; skipping upset plot") }
We also compare the simulation results with and without an informative covariate.
resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01)) resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key, performanceMetric, alpha, value), select(resdfu, rep, blabel, param.alpha, key, performanceMetric, alpha, value), by = c("rep", "blabel", "param.alpha", "key", "performanceMetric", "alpha"), suffix = c(".info", ".uninfo")) resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo) plotsim_average(resdfiu, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
Next, we consider the setting where 95% of tests are null.
pi0 <- pi0_sine(0.95) # numeric: proportion of null hypotheses seed <- 913
We next run the simulations.
if (file.exists(null95_file)) { res <- readRDS(null95_file) } else { res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m, pi0 = pi0, es_dist = es_dist, icovariate = icovariate, ts_dist = ts_dist, null_dist = null_dist, seed = seed, mc.cores = cores) saveRDS(res, file = null95_file) } res_i <- lapply(res, `[[`, "informative") res_u <- lapply(res, `[[`, "uninformative")
Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.
onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist, icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")
strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)
We plot the averaged results across r B
replications.
resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01)) plotsim_average(resdf, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE)
We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.
covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size") covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")
Finally, (if enough methods produce rejections at r ualpha
) we take a look at
the overlap of rejections between methods.
if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) { aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE) } else { message("Not enough methods found rejections at alpha ", ualpha, "; skipping upset plot") }
We also compare the simulation results with and without an informative covariate.
resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01)) resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key, performanceMetric, alpha, value), select(resdfu, rep, blabel, param.alpha, key, performanceMetric, alpha, value), by = c("rep", "blabel", "param.alpha", "key", "performanceMetric", "alpha"), suffix = c(".info", ".uninfo")) resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo) plotsim_average(resdfiu, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
Next, we consider the setting where 99% of tests are null.
pi0 <- pi0_sine(0.99) # numeric: proportion of null hypotheses seed <- 2015
We next run the simulations.
if (file.exists(null99_file)) { res <- readRDS(null99_file) } else { res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m, pi0 = pi0, es_dist = es_dist, icovariate = icovariate, ts_dist = ts_dist, null_dist = null_dist, seed = seed, mc.cores = cores) saveRDS(res, file = null99_file) } res_i <- lapply(res, `[[`, "informative") res_u <- lapply(res, `[[`, "uninformative")
Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.
onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist, icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")
strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)
We plot the averaged results across r B
replications.
resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01)) plotsim_average(resdf, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE) plotsim_average(resdf, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE)
We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.
covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size") covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")
Finally, (if enough methods produce rejections at r ualpha
) we take a look at
the overlap of rejections between methods.
if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) { aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE) } else { message("Not enough methods found rejections at alpha ", ualpha, "; skipping upset plot") }
We also compare the simulation results with and without an informative covariate.
resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01)) resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key, performanceMetric, alpha, value), select(resdfu, rep, blabel, param.alpha, key, performanceMetric, alpha, value), by = c("rep", "blabel", "param.alpha", "key", "performanceMetric", "alpha"), suffix = c(".info", ".uninfo")) resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo) plotsim_average(resdfiu, met="rejections", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="FDR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TPR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE) plotsim_average(resdfiu, met="TNR", filter_set = excludeSet, merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
sessionInfo()
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.