#' Generating function for Poisson Data.
#'
#' \code{genpoissondata()} function mainly used internally by
#' \code{poissontrialsimulator()} function to generate data for a two-arm
#' clinical trial, experimental and control groups. Can be used to generate
#' random trial data.
#'
#' @param sample_size Number of subjects per arm.
#' @param mu1 lambda parameter used in call to \code{rpois()}.
#' Used only in control arm.
#' @param mean_ratio Desired Mean Ratio between experimental and control groups.
#'
#' @return \code{genpoissondata()} returns a data frame with columns: 'id', 'treatment',
#' and 'y'.
#'
#' @examples
#' samplehistdata <- genpoissondata(sample_size=60, mu1=1, mean_ratio=1.0)
#' samplehistdata
#' @export
genpoissondata <- function(sample_size, mu1, mean_ratio) {
# --------------------------------------------------------------- #
# The function genpoissondata simulates a balanced clinical trial
# with 'sample_size' subjects per arm using a Poisson distribution.
# 'm1' is the Poisson lambda parameter, which equals the mean.
# 'mean_ratio is the ratio of group means (experimental group over
# control group).
#
# In the code below y1 and mu1 are data for the control goup.
# In the code below y2 and mu2 are data for the experimental group.
# --------------------------------------------------------------- #
# mu1 is the Poisson distribution lambda parameter for the control
# group (ie the Poisson mean). Given mean_ratio, I need to
# calculate the Poisson mean for experimental group.
mu2 <- mu1 * mean_ratio
# Create outcomes for both groups.
y1 <- stats::rpois(sample_size, lambda = mu1)
y2 <- stats::rpois(sample_size, lambda = mu2)
#Take all data created above and put into a data frame that contains
# the required variables.
subjid <- seq(from = 1, to = 2 * sample_size)
trt <- c(rep(0, sample_size), rep(1, sample_size))
y <- c(y1, y2)
gendata <- data.frame(subjid, trt, y)
colnames(gendata) <- c("id", "treatment", "y")
return(gendata)
}
#' Log-likelihood function for two-arm trial with historical data using Poisson
#' distribution.
#'
#' \code{poissonloglike()} function only used internally by
#' \code{poissontrialsimulator()} function to estimate Poisson model parameters
#' when clinical trial involves experimental and control groups as well as historical
#' control data. The Poisson log-likelihood is calculated by modeling \code{data}
#' as a Poisson random variable. Not to be called directly by user.
#'
#' @param params Two element vector of Poisson parameters. The first and second elements
#' are the intercept (beta0) and treatment effect parameter (beta1), where the treatment effect is
#' a log mean ratio (experimental group over control group). The lambda parameter required by
#' dpois() is equal to exp(params[1] + params[2]*treatment). It is assumed that the params[1]
#' parameter is the same in both randomized and historical data. It is assumed that
#' the lambda parameter in the randomized and historical control data is equal to exp(params[1]).
#' @param randdata Dataset of randomly generated trial data. Randomized trial datasets
#' must have 3 columns: id, treatment, and y. The value of treatment must be 0 (control)
#' or 1 (experimental). The values of y must be non negative.
#' @param histdata Dataset of historical data. Historical datasets must have 3 columns: id,
#' treatment, and y. The value of treatment should be 0. The values of y must be
#' non negative.
#' @param a0 Power prior parameter: 0 implies historical data is ignored and 1 implies
#' all information in historical data is used.
#'
#' @return \code{poissonloglike()} returns a value of the loglikelihood function
#' given a set of Poisson parameters, randomly generated trial data, and observed
#' historical data.
#'
#' @examples
#' #None
#' @keywords internal
#' @noRd
poissonloglike <- function(params, randdata, histdata, a0) {
# --------------------------------------------------------------- #
# This function calculates the Poisson log-likelihood given
# a vector of parameter values, a dataset of randomized trial
# data (two arms, no covariates beyond treatment), and a dataset
# of historical control data.
# The Poisson mean parameter, lambda, is assumed to be the same
# in both control groups.
# The log mean parameter for the randomized experimental group is
# a linear function of the log control mean and the treatment
# effect, log mean ratio. The parameters are beta0, and beta1.
# beta0 and beta1 are regression parameters that are linked to
# Poisson mean parameter, lambda, via the exponential link.
# beta1 is the log mean ratio (experimental group over control
# group), while beta0 is the log mean parameter for controls.
# --------------------------------------------------------------- #
# Get params
beta0 <- params[1]
beta1 <- params[2]
# Calculate the mean parameter vector for all randomized observations on log scale.
pred_i <- beta0 + beta1 * randdata$treatment
# Calculate the log-likelihood values for all randomized observations.
ll_R <- stats::dpois(randdata$y, lambda = exp(pred_i), log = TRUE)
# Calculate the loglikelihood values for all historical control observations.
# Note that mean for historical controls is assumed to equal mean of randomized controls
ll_H <- stats::dpois(histdata$y, lambda = exp(beta0), log = TRUE)
# Calculate the overall log likelihood by adding the randomized log-likelihood to the historical control
# log-likelihood by a0, where a0 is the power prior parameter. This a0 value is defined by the
# user and not estimated via object function optimization.
ll <- sum(ll_R) + a0 * sum(ll_H)
# Return the sum of all individual elements to the negative log-likelihood
return(-ll)
}
#' Simulate a single randomized trial using a Gaussian outcome and information from
#' historical controls.
#'
#' \code{poissontrialsimulator()} function only used internally by
#' \code{poisson_sim()} function to run a single trial simulation involving historical
#' control data and a Poisson outcome.
#'
#' The simulation of a trial with a Poisson outcome involving historical control data returns
#' an estimate of the mean ratio as well as an estimate of the log mean ratio variance.
#' Finally the simulation returns an indication of whether or not the simulated trial led to
#' a rejection of the null hypothesis (1) or not (0).
#'
#' \code{poissontrialsimulator()} should not be called directly by user.
#'
#' @param sample_size_val Number of subjects per arm.
#' @param histdata Dataset of historical data. Historical datasets must have 3 columns: id,
#' treatment, and y. The value of treatment should be 0. The values of y must be
#' non negative.
#' @param mu1_val Mean parameter value for randomized control arm. Used in call to \code{rpois()}.
#' @param mean_ratio_val Desired mean ratio between randomized experimental and control groups.
#' @param a0_val A power prior parameter ranging from 0 to 1, where 0
#' implies no information from historical data should be used, 1 implies all of
#' the information from historical data should be used. A value between 0 and 1
#' implies that a proportion of the information from historical data will be used.
#' @param alpha A number ranging between 0 and 1 that defines the acceptable Type 1
#' error rate. Default is 0.05.
#'
#' @return \code{poissontrialsimulator()} returns a vector of simulation results. The
#' first element is an estimated mean ratio, the second element is the estimated
#' variance of the log mean ratio, and the third element is a 0/1 variable indicator
#' whether or not the trial rejected the null hypothesis (1) or failed to reject
#' the null hypothesis (0).
#'
#' @examples
#' #None
#' @keywords internal
#' @noRd
poissontrialsimulator <- function(sample_size_val, histdata, mu1_val, mean_ratio_val, a0_val, alpha) {
# --------------------------------------------------------------- #
# This function simulates a two-arm Bayesian trial where
# historical data is utilized in the parameter estimation.
# --------------------------------------------------------------- #
# First, Generate Poisson trial data given the user defined trial characteristics.
sampleranddata <- genpoissondata(sample_size = sample_size_val, mu1 = mu1_val, mean_ratio = mean_ratio_val)
# Generate initial values for your call to optim()
initializemodel <- stats::glm(y ~ treatment, family = stats::poisson(link = "log"), data = sampleranddata)
initialbeta0 <- initializemodel$coefficients[1]
initialbeta1 <- initializemodel$coefficients[2]
# Generate the Bayesian CLT based parameter estimates needed for inference on mean ratio.
fitmod <- stats::optim(c(initialbeta0, initialbeta1), poissonloglike, randdata = sampleranddata, histdata = histdata, a0 = a0_val,
method = "Nelder-Mead", hessian = TRUE)
#Extract model parameters and statistics
modparm <- fitmod$par
covarmat <- solve(fitmod$hessian)
poissonlogmeanratio <- modparm[2]
poissonmeanratio <- exp(poissonlogmeanratio)
lower95_poissonmeanratio <- exp(poissonlogmeanratio - stats::qnorm(1 - alpha/2) * sqrt(covarmat[2, 2]))
upper95_poissonmeanratio <- exp(poissonlogmeanratio + stats::qnorm(1 - alpha/2) * sqrt(covarmat[2, 2]))
#Make a decision about the simulated trial, reject or fail to reject null hypothesis.
reject <- ifelse(((lower95_poissonmeanratio > 1) | (upper95_poissonmeanratio < 1)), 1, 0)
output <- c(poissonmeanratio, covarmat[2, 2], reject)
#Return the mean ratio, the estimated variance of the log mean ratio, and the trial decision.
names(output) <- c("mean_ratio_mr", "log_mean_ratio_var", "reject")
return(output)
}
#' Simulate a single randomized trial using a Poisson outcome but not including any information from
#' historical controls.
#'
#' \code{poissontrialsimulatornohist()} function only used internally by
#' \code{simple_poisson_sim()} function to estimate Poisson model parameters
#' when clinical trial involves experimental and control groups but no historical control
#' data.
#'
#' The simulation of a trial with a Poisson outcome involving no historical control data returns
#' an estimate of the mean ratio as well as an estimate of the log mean ratio variance.
#' Finally the simulation returns an indication of whether or not the simulated trial led to
#' a rejection of the null hypothesis (1) or not (0).
#'
#' \code{poissontrialsimulatornohist()} should not be called directly by user.
#'
#' @param sample_size_val Number of subjects per arm.
#' @param mu1_val Mean parameter value for randomized control arm. Used in call to \code{rpois()}.
#' @param mean_ratio_val Desired mean ratio between randomized experimental and control groups.
#' @param alpha A number ranging between 0 and 1 that defines the acceptable Type 1
#' error rate. Default is 0.05.
#'
#' @return \code{poissontrialsimulatornohist} returns a vector of simulation results. The
#' first element is an estimated mean ratio, the second element is the estimated
#' variance of the log mean ratio, and the third element is a 0/1 variable indicator
#' whether or not the trial rejected the null hypothesis (1) or failed to reject
#' the null hypothesis (0).
#'
#' @examples
#' #None
#' @keywords internal
#' @noRd
poissontrialsimulatornohist <- function(sample_size_val, mu1_val, mean_ratio_val, alpha) {
# --------------------------------------------------------------- #
# This function simulates a two-arm Bayesian trial where
# historical data is not utilized in the parameter estimation.
# No call to optim is necessary since using flat priors and
# canonical link (expected and observed information are identical)
# --------------------------------------------------------------- #
# First, Generate Poisson trial data given the user defined trial characteristics.
sampleranddata <- genpoissondata(sample_size = sample_size_val, mu1 = mu1_val, mean_ratio = mean_ratio_val)
# Generate the Bayesian CLT based parameter estimates needed for inference on mean ratio.
initializemodel <- stats::glm(y ~ treatment, family = stats::poisson(link = "log"), data = sampleranddata)
#Extract model parameters and statistics
modparm <- initializemodel$coefficients
covarmat <- stats::vcov(initializemodel)
poissonlogmeanratio <- modparm[2]
poissonmeanratio <- exp(poissonlogmeanratio)
lower95_poissonmeanratio <- exp(poissonlogmeanratio - stats::qnorm(1 - alpha/2) * sqrt(covarmat[2, 2]))
upper95_poissonmeanratio <- exp(poissonlogmeanratio + stats::qnorm(1 - alpha/2) * sqrt(covarmat[2, 2]))
#Make a decision about the simulated trial, reject or fail to reject null hypothesis.
reject <- ifelse(((lower95_poissonmeanratio > 1) | (upper95_poissonmeanratio < 1)), 1, 0)
output <- c(poissonmeanratio, covarmat[2, 2], reject)
#Return the mean ratio, the estimated variance of the log mean ratio, and the trial decision.
names(output) <- c("mean_ratio_mr", "log_mean_ratio_var", "reject")
return(output)
}
#' Repeated Two Arm Bayesian Clinical Trial Simulation with Historical Data and
#' Poisson Outcome.
#'
#' \code{poisson_sim()} function only used internally by \code{historic_sim()}
#' function to run a set of trial simulations involving historical
#' control data and a Poisson outcome. User defined simulation parameters are
#' used to generate a set of trial scenarios. Each scenario is simulated multiple
#' times and then means are taken to calculate estimates of power, mean ratios,
#' and other user requested study summary statistics like variance of mean
#' ratio, bias (on mean ratio scale), and mse (on mean ratio scale).
#' The number of repeated simulations is defined by the user.
#'
#' \code{poisson_sim()} should not be called directly by user.
#'
#' @param trial_reps Number of trials to replicate within each combination of
#' a0_val, subj_per_arm, effect_vals, and rand_control_diff. As the number
#' of trials increases, the precision of the estimate will increase. Default is
#' 100.
#' @param subj_per_arm A vector of sample sizes, all of which must be positive
#' integers.
#' @param a0_vals A vector of power prior parameters ranging from 0 to 1, where 0
#' implies no information from historical data should be used, 1 implies all of
#' the information from historical data should be used. A value between 0 and 1
#' implies that a proportion of the information from historical data will be used.
#' @param effect_vals A vector of mean ratios (randomized experimental over control),
#' all of which must be positive.
#' @param rand_control_diff For Poisson outcomes this is a vector of mean ratios
#' (randomized controls over historical controls) that represent ratios
#' between randomized and historical controls.
#' @param hist_control_data A dataset of historical data. Default is \code{NULL}.
#' Historical datasets must have 3 columns: id, treatment, and y. The value of
#' treatment should be 0. The values of y must be non negative.
#' @param alpha A number ranging between 0 and 1 that defines the acceptable Type 1
#' error rate. Default is 0.05.
#' @param get_var A TRUE/FALSE indicator of whether an array of variance
#' estimates will be returned. Default is \code{FALSE}.
#' @param get_bias A TRUE/FALSE indicator of whether an array of bias
#' estimates will be returned. Default is \code{FALSE}.
#' @param get_mse A TRUE/FALSE indicator of whether an array of MSE
#' estimates will be returned. Default is \code{FALSE}.
#' @param quietly A TRUE/FALSE indicator of whether notes are printed
#' to output about simulation progress as the simulation runs. If
#' running interactively in RStudio or running in the R console,
#' \code{quietly} can be set to FALSE. If running in a Notebook or
#' knitr document, \code{quietly} needs to be set to TRUE. Otherwise
#' each note will be printed on a separate line and it will take up
#' a lot of output space. Default is \code{TRUE}.
#'
#' @return \code{poisson_sim()} returns an S3 object of class bayes_ctd_array.
#'
#' @examples
#' #None
#' @keywords internal
#' @noRd
poisson_sim <- function(trial_reps, subj_per_arm, a0_vals, effect_vals,
rand_control_diff, hist_control_data, alpha=0.05,
get_var=FALSE, get_bias=FALSE, get_mse=FALSE,
quietly=TRUE) {
# --------------------------------------------------------------- #
# For a set of user specified scenarios (defined by combinations
# of user specified parameters), simulate "trial_reps" trials
# and estimate power, mean ratio estimate, and if requested
# by user: variance of mean ratio, bias, and mse. Using a Poisson
# oucome and incorporating data from historical controls.
# --------------------------------------------------------------- #
# Need to take the historical data and generate distributional parameter estimates
histdata <- hist_control_data
hist_model <- stats::glm(y ~ 1, family = stats::poisson(link = "log"), data = histdata)
initialmu1 <- exp(hist_model$coefficients[1])
# Initialize arrays to hold power, mse, and bias estimate results as requested.
len_val <- length(rand_control_diff) * length(effect_vals) * length(a0_vals) * length(subj_per_arm)
power_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
est_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
if (get_mse == TRUE) {
mse_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
}
if (get_bias == TRUE) {
bias_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
}
if (get_var == TRUE) {
var_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
}
# Cycle through the passed values in rand_control_diff, effect_vals, a0_val, and subj_per_arm to generate the
# requested trial characteristics.
for (diffs in 1:length(rand_control_diff)) {
# Need to adjust the randomized control mean given the historical control mean and the mean ratios given in
# rand_control_diff
adjmu1 <- initialmu1 * rand_control_diff[diffs]
for (effvals in 1:length(effect_vals)) {
for (a0vals in 1:length(a0_vals)) {
for (sizes in 1:length(subj_per_arm)) {
if (!quietly){
cat("\r", c(subj_per_arm[sizes], a0_vals[a0vals], effect_vals[effvals], rand_control_diff[diffs]))
}
# For each combination of rand_control_diff, effect_vals, a0_val, and subj_per_arm, simulate the trial
#trial_reps times and then calculate the mean reject rate to estimate power. For bias, work on the
#mean ratio scale and take mean of differences between estimated mean ratios and the true mean
#ratio. For mse, calculate the mean of squared differences between the estimated mean
#ratios and the true mean ratio.
collect <- matrix(rep(0, 3 * trial_reps), ncol = 3)
for (k in 1:trial_reps) {
# sample_size_val will be equal to both arms
collect[k, ] <- poissontrialsimulator(sample_size_val = subj_per_arm[sizes], histdata, mu1_val = adjmu1,
mean_ratio_val = effect_vals[effvals], a0_val = a0_vals[a0vals], alpha = alpha)
}
#collect is a matrix of data, mean ratio in 1st column, log mean ratio variance
# in second column, and a vector of 0/1s in third column indicating whether or
# not trial represented by row led to a rejection of null hypothesis (1) or not (0).
# Note that collect gets rewritten for each scenario.
colnames(collect) <- c("mean_ratio", "log_mean_ratio_var", "reject")
#Start calculating means for each scenarios and placing the means in the proper
# array. Every simulation will contain an array of power results and mean
# ratio estimates.
power_results[sizes, a0vals, effvals, diffs] <- mean(collect[, 3])
est_results[sizes, a0vals, effvals, diffs] <- mean(collect[, 1])
if (get_bias == TRUE) {
bias_results[sizes, a0vals, effvals, diffs] <- mean(collect[, 1] - effect_vals[effvals])
}
if (get_var == TRUE) {
var_results[sizes, a0vals, effvals, diffs] <- mean((collect[, 1]*sqrt(collect[, 2]))^2)
}
if (get_mse == TRUE) {
mse_results[sizes, a0vals, effvals, diffs] <- mean((collect[, 1] - effect_vals[effvals])^2)
}
if (!quietly){
cat("\r", " ")
}
}
}
}
}
cat("\n")
#Lines 397 through 700 simply apply names to the dimensions of array created by the
# simulation depending on values get_bias, get_var, and get_mse.
if (get_bias == FALSE & get_var == FALSE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results)
names(output) <- c("power", "est")
}
if (get_bias == FALSE & get_var == FALSE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, mse_results)
names(output) <- c("power", "est", "mse")
}
if (get_bias == TRUE & get_var == FALSE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, bias_results)
names(output) <- c("power", "est", "bias")
}
if (get_bias == TRUE & get_var == FALSE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, bias_results, mse_results)
names(output) <- c("power", "est", "bias", "mse")
}
if (get_bias == FALSE & get_var == TRUE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results)
names(output) <- c("power", "est", "var")
}
if (get_bias == FALSE & get_var == TRUE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results, mse_results)
names(output) <- c("power", "est", "var", "mse")
}
if (get_bias == TRUE & get_var == TRUE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results, bias_results)
names(output) <- c("power", "est", "var", "bias")
}
if (get_bias == TRUE & get_var == TRUE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results, bias_results, mse_results)
names(output) <- c("power", "est", "var", "bias", "mse")
}
#Create an list of results and apply the bayes_ctd_array class to the list, then
# return the output object.
class_out <- list(data = output, subj_per_arm = subj_per_arm, a0_vals = a0_vals, effect_vals = effect_vals, rand_control_diff = rand_control_diff, objtype= 'historic')
class(class_out) <- append("bayes_ctd_array", class(class_out))
return(class_out)
}
#' Repeated Two Arm Bayesian Clinical Trial Simulation with no Historical Data and
#' Poisson Outcome.
#'
#' \code{simple_poisson_sim()} function only used internally by \code{simple_sim()}
#' function to run a set of trial simulations involving no historical
#' control data and a Poisson outcome. User defined simulation parameters are
#' used to generate a set of trial scenarios. Each scenario is simulated multiple
#' times and then means are taken to calculate estimates of power, mean ratios,
#' and other user requested study summary statistics like variance of mean
#' ratio, bias (on mean ratio scale), and mse (on mean ratio scale).
#' The number of repeated simulations is defined by the user.
#'
#' \code{simple_poisson_sim()} should not be called directly by user.
#'
#' @param trial_reps Number of trials to replicate within each combination of
#' subj_per_arm and effect_vals. As the number of trials increases, the
#' precision of the estimate will increase. Default is 100.
#' @param subj_per_arm A vector of sample sizes, all of which must be positive
#' integers.
#' @param effect_vals A vector of mean ratios (randomized experimental over control),
#' all of which must be positive.
#' @param mu1_val lambda parameter value for randomized control arm. Used in call to \code{rpois()}.
#' @param alpha A number ranging between 0 and 1 that defines the acceptable Type 1
#' error rate. Default is 0.05.
#' @param get_var A TRUE/FALSE indicator of whether an array of variance
#' estimates will be returned. Default is \code{FALSE}.
#' @param get_bias A TRUE/FALSE indicator of whether an array of bias
#' estimates will be returned. Default is \code{FALSE}.
#' @param get_mse A TRUE/FALSE indicator of whether an array of MSE
#' estimates will be returned. Default is \code{FALSE}.
#' @param quietly A TRUE/FALSE indicator of whether notes are printed
#' to output about simulation progress as the simulation runs. If
#' running interactively in RStudio or running in the R console,
#' \code{quietly} can be set to FALSE. If running in a Notebook or
#' knitr document, \code{quietly} needs to be set to TRUE. Otherwise
#' each note will be printed on a separate line and it will take up
#' a lot of output space. Default is \code{TRUE}.
#'
#' @return \code{simple_poisson_sim()} returns an S3 object of class bayes_ctd_array.
#'
#' @examples
#' #None
#' @keywords internal
#' @noRd
simple_poisson_sim <- function(trial_reps=100, subj_per_arm, effect_vals, mu1_val,
alpha=0.05, get_var=FALSE, get_bias=FALSE,
get_mse=FALSE, quietly=TRUE) {
# --------------------------------------------------------------- #
# For a set of user specified scenarios (defined by combinations
# of user specified parameters), simulate "trial_reps" trials
# and estimate power, mean ratio estimate, and if requested by user:
# variance of mean ratio, bias, and mse. Using a Lognormal oucome
# but historical control data is not used.
# --------------------------------------------------------------- #
#The rand_control_diff and a0_val dimensions will be set to 1, and the value for
# rand_control_diff will be 1 and a0_val will be set to 0. All summaries will
# be set up to ignore these dimensions for simple (no historical data) simulations.
rand_control_diff <- 1
a0_vals <- 0
# Initialize arrays to hold power, mse, and bias estimate results as requested.
len_val <- length(rand_control_diff) * length(effect_vals) * length(a0_vals) * length(subj_per_arm)
power_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
est_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
if (get_mse == TRUE) {
mse_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
}
if (get_bias == TRUE) {
bias_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
}
if (get_var == TRUE) {
var_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
}
# Cycle through the passed values in rand_control_diff, effect_vals, a0_val, and subj_per_arm to generate the
# requested trial characteristics. Note that rand_control_diff is set to 1 and a0_val is set to 0.
for (diffs in 1:length(rand_control_diff)) {
for (effvals in 1:length(effect_vals)) {
for (a0vals in 1:length(a0_vals)) {
for (sizes in 1:length(subj_per_arm)) {
if (!quietly){
cat("\r", c(subj_per_arm[sizes], a0_vals[a0vals], effect_vals[effvals], rand_control_diff[diffs]))
}
# For each combination of rand_control_diff, effect_vals, a0_val, and subj_per_arm, simulate the trial
# trial_reps times and then calculate the mean reject rate to estimate power. For bias, work on the
#mean ratio scale and take mean of differences between estimated mean ratios and the true mean ratio.
#For mse, calculate the mean of squared differences between the estimated mean ratios and the true
#mean ratio. Note that rand_control_diff is set to 1 and a0_val is set to 0.
collect <- matrix(rep(0, 3 * trial_reps), ncol = 3)
for (k in 1:trial_reps) {
# sample_size_val will be equal to both arms
collect[k, ] <- poissontrialsimulatornohist(sample_size_val = subj_per_arm[sizes], mu1_val = mu1_val,
mean_ratio_val = effect_vals[effvals], alpha = alpha)
}
#collect is a matrix of data, mean ratio in 1st column, log mean ratio variance
# in second column, and a vector of 0/1s in third column indicating whether or
# not trial represented by row led to a rejection of null hypothesis (1) or not (0).
# Note that collect gets rewritten for each scenario.
colnames(collect) <- c("mean_ratio", "log_mean_ratio_var", "reject")
#Start calculating means for each scenarios and placing the means in the proper
# array. Every simulation will contain an array of power results and mean
# ratio estimates.
power_results[sizes, a0vals, effvals, diffs] <- mean(collect[, 3])
est_results[sizes, a0vals, effvals, diffs] <- mean(collect[, 1])
if (get_bias == TRUE) {
bias_results[sizes, a0vals, effvals, diffs] <- mean(collect[, 1] - effect_vals[effvals])
}
if (get_var == TRUE) {
var_results[sizes, a0vals, effvals, diffs] <- mean((collect[, 1]*sqrt(collect[, 2]))^2)
}
if (get_mse == TRUE) {
mse_results[sizes, a0vals, effvals, diffs] <- mean((collect[, 1] - effect_vals[effvals])^2)
}
if (!quietly){
cat("\r", " ")
}
}
}
}
}
cat("\n")
#Lines 823 through 1126 simply apply names to the dimensions of array created by the
# simulation depending on values get_bias, get_var, and get_mse.
if (get_bias == FALSE & get_var == FALSE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results)
names(output) <- c("power", "est")
}
if (get_bias == FALSE & get_var == FALSE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, mse_results)
names(output) <- c("power", "est", "mse")
}
if (get_bias == TRUE & get_var == FALSE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, bias_results)
names(output) <- c("power", "est", "bias")
}
if (get_bias == TRUE & get_var == FALSE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, bias_results, mse_results)
names(output) <- c("power", "est", "bias", "mse")
}
if (get_bias == FALSE & get_var == TRUE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results)
names(output) <- c("power", "est", "var")
}
if (get_bias == FALSE & get_var == TRUE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results, mse_results)
names(output) <- c("power", "est", "var", "mse")
}
if (get_bias == TRUE & get_var == TRUE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results, bias_results)
names(output) <- c("power", "est", "var", "bias")
}
if (get_bias == TRUE & get_var == TRUE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results, bias_results, mse_results)
names(output) <- c("power", "est", "var", "bias", "mse")
}
#Create an list of results and apply the bayes_ctd_array class to the list, then
# return the output object.
class_out <- list(data = output, subj_per_arm = subj_per_arm, a0_vals = 0, effect_vals = effect_vals, rand_control_diff = 1, objtype= 'simple')
class(class_out) <- append("bayes_ctd_array", class(class_out))
return(class_out)
}
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.