knitr::opts_chunk$set(echo = TRUE)
We consider fixed and group sequential design under non-proportional hazards when testing with the logrank test. We focus primarily on the average hazard ratio approach, expanding the asymptotic approach of @Mukhopadhyay2020 to both group sequential design and more complex enrollment assumptions. The theoretical background for this is provided in other vignettes in this package. We provide a few basic examples along the lines of @NPHWG2020sim for illustration of design considerations under the following assumptions:
Illustrations include
We focus on results rather than code, but hidden code can be revealed for all examples.
The primary packages needed are gsdmvn and gsDesign2 which will likely be combined into gsDesign2 in the near future. Other packages used are supportive.
library(gsDesign) library(ggplot2) library(dplyr) library(gt) library(tidyr) library(tibble) devtools::load_all()
Expected enrollment duration is 18 months with piecewise constant enrollment rates escalating every 2 months until month 6 where enrollment is assumed to have reached steady state. We will later assume a similar ramp-up period with 24 months expected enrollment duration.
# Set the enrollment table of totally 24 month enroll24 <- tibble( Stratum = rep("All", 4), # un-stratified duration = c(rep(2, 3), 18), # 6 month ramp-up of enrollment, 24 months enrollment time target rate = 1:4 # ratio of the enrollment rate ) # Adjust enrollment rates to enroll 100 subjects enroll24$rate <- enroll24$rate * 100 / sum(enroll24$duration * enroll24$rate) # Set the enrollment table for 18 month expected enrollment enroll18 <- tibble( Stratum = rep("All", 4), # un-stratified duration = c(rep(2, 3), 12), # 6 month ramp-up of enrollment, 18 months enrollment time target rate = 1:4 # ratio of the enrollment rate ) # Adjust enrollment rates to enroll 100 subjects enroll18$rate <- enroll18$rate * 100 / sum(enroll18$duration * enroll18$rate) # Put these in a single tibble by scenario # We will use 18 month enrollment for delayed effect and crossing hazards scenarios enrollRates <- rbind( enroll18 %>% mutate(Scenario = "PH"), enroll18 %>% mutate(Scenario = "Shorter delayed effect"), enroll18 %>% mutate(Scenario = "Longer delayed effect"), enroll18 %>% mutate(Scenario = "Crossing") )
We will consider the following failure rate assumptions:
Month <- c(0, 4, 6, 44) duration <- Month - c(0, Month[1:3]) control_rate <- log(2) / c(rep(16, 4), rep(14, 4), rep(14, 4)) s <- tibble(Scenario = c(rep("PH", 4), rep("Delayed effect", 4), rep("Crossing", 4)), Treatment = rep("Control", 12), Month = rep(Month, 3), duration = rep(duration, 3), rate = control_rate, hr = c(rep(.7, 4), c(1, 1, 1, .575), c(1.5,1.5, .5, .5))) s <- rbind(s, s %>% mutate(Treatment = "Experimental", rate = rate * hr)) %>% group_by(Scenario, Treatment) %>% mutate(Survival = exp(-cumsum(duration * rate))) ggplot(s, aes(x = Month, y = Survival, col = Scenario, lty = Treatment)) + geom_line() + scale_y_log10(breaks = (1 : 10) / 10, lim = c(.1, 1))+ scale_x_continuous(breaks = seq(0, 42, 6))
# get 4 scenarios control_median <- c(14, 12, 12, 12) Month <- c(0, 4, 6, 44) duration <- Month - c(0, Month[1:3]) # HR by time period for each scenario hr <- c(rep(.7, 4), # constant hazard ratio of 0.7 1, 1, .6, .6, # hazard ratio of 1 for 4 months followed by a hazard ratio of 0.6. 1, 1, 1, .6, # hr = 1 for 6 months followed by hr = .6 1.5, 1.5, .5, .5) # hazard ratio of 1.5 for 4 months followed by a hazard ratio of 0.5.
The survival curves for these 4 scenarios are shown below:
# Put parameters together in a tibble s <- tibble( Scenario = c(rep("PH", 4), rep("Shorter delayed effect", 4), rep("Longer delayed effect", 4), rep("Crossing", 4)), Treatment = rep("Control", 16), Month = rep(Month, 4), # Periods for constant HR duration = rep(duration, 4), rate = log(2) / c(rep(control_median[1], 4), rep(control_median[2], 4), rep(control_median[3], 4), rep(control_median[4], 4)), hr = hr) # calculate the survival at each change point for each scenario s <- rbind(s, s %>% mutate(Treatment = "Experimental", rate = rate * hr)) %>% group_by(Scenario, Treatment) %>% mutate(Survival = exp(-cumsum(duration * rate)))
# plot the survival curve ggplot(s, aes(x = Month, y = Survival, col = Scenario, lty = Treatment, shape = Treatment)) + geom_line() + annotate("text", x = 18, y = .1, label = "Control for scenarios other than PH have same survival") + scale_y_log10(breaks = (1:10) /10, lim = c(.07, 1)) + scale_x_continuous(breaks = seq(0, 42, 6)) + ggtitle("Survival over time for 4 scenarios studied")
The average hazard ratio for these 4 scenarios are shown below. We note that under the Shorter delayed effect scenario, the average hazard ratio approaches that of the PH scenario after a study duration of about 36 months.
# Durations to be used in common for all failure rate scenarios dur <- Month[2:4] - Month[1:3] # Set the failure table # We use exponential failure, proportional hazards failRates <- rbind( tibble(Scenario = "PH", Stratum = "All", duration = dur, failRate = log(2) / 14, hr = hr[1], dropoutRate = .001), tibble(Scenario = "Shorter delayed effect", Stratum = "All", duration = dur, failRate = log(2) / 11, hr = hr[6:8], dropoutRate = .001), tibble(Scenario = "Longer delayed effect", Stratum = "All", duration = dur, failRate = log(2) / 11, hr = hr[10:12], dropoutRate = .001), tibble(Scenario = "Crossing", Stratum = "All", duration = dur, failRate = log(2) / 11, hr = hr[14:16], dropoutRate = .001)) hr <- do.call( rbind, lapply( c("PH", "Shorter delayed effect", "Longer delayed effect", "Crossing"), function(x){ AHR(enrollRates = enrollRates %>% filter(Scenario == x), failRates = failRates %>% filter(Scenario == x), totalDuration = c(.001, seq(4, 44, 4))) %>% mutate(Scenario = x) }))
ggplot(hr, aes(x = Time, y = AHR, col = Scenario)) + geom_line() + scale_x_continuous(breaks = seq(0, 42, 6)) + ggtitle("Average hazard ratio (AHR) by study duration", subtitle = "Under the 4 scenarios examined")
The number of events for these 4 scenarios are shown below. Under the 3 NPH scenarios events accumulate faster than under the PH scenario both due to a lower control median and/or a delayed effect.
ggplot(hr, aes(x = Time, y = `Events`, col = Scenario)) + geom_line() + scale_x_continuous(breaks = seq(0, 42, 6)) + ylab("Expected events per 100 enrolled") + ggtitle("Expected event accumulation under the 4 scenarios studied")
From the above, we see that slight variations in control failure rates and the potential for a delayed effect can substantially accelerate the accumulation of events. If doing an event-based cutoff for analysis these slight variations can lead to earlier analyses than anticipated when the average hazard ratio that is expected with longer follow-up would never be achieved. We examine the implications further below.
We power a fixed design at 90\% with 2.5\% one-sided Type I error under the different scenarios under consideration. We now assume the 18 month enrollment pattern for all scenarios. For the PH and Shorter delayed effect scenarios we need a similar AHR, number of events and sample size for a 36 month study. The other two scenarios with crossing survival curves or a large effect delay would require substantially larger sample sizes due to not achieving a similar AHR by month 36.
ss_ahr_fixed <- do.call( rbind, lapply(c("PH", "Shorter delayed effect", "Longer delayed effect", "Crossing"), function(x) { xx <- gs_design_ahr(enrollRates = enrollRates %>% filter(Scenario == x), failRates = failRates %>% filter(Scenario == x), analysisTimes = 36, upper = gs_b, upar = qnorm(.975), lower = gs_b, lpar = -Inf, alpha = .025, beta = .1) ans <- xx$analysis %>% select(Time, N, Events, AHR) %>% mutate(Scenario = x) return(ans) } ) ) ss_ahr_fixed %>% gt() %>% fmt_number(columns = 1:3,decimals = 0) %>% fmt_number(columns = 4, decimals = 3) %>% tab_header(title = "Sample Size and Events Required by Scenario", subtitle = "36 Month Trial duration, 2.5% One-sided Type 1 Error, 90% Power")
Assuming the shorter delayed effect is the primary scenario for which we wish to protect power, how long should the trial be to optimize the tradeoffs between sample size, AHR and events required? We will inform this tradeoff by looking sizing the trial for different assumed trial durations with the same failure rates and assumed relative enrollment rates. The counts of events required is perhaps the most interesting here in that a 24 month trial requires almost twice the events to be powered at 90% compared to a trial of 42 months duration. For further study, we will consider the 36 month trial duration as a reasonable tradeoff between time, sample size and power under a presumed delayed effect of 4 months followed by a hazard ratio of 0.6 thereafter.
do.call( rbind, lapply(c(24, 30, 36, 42), function(x){ ans <- gs_design_ahr(enrollRates = enrollRates %>% filter(Scenario == "Shorter delayed effect"), failRates = failRates %>% filter(Scenario == "Shorter delayed effect"), analysisTimes = x, upper = gs_b, upar = qnorm(.975), lower = gs_b, lpar = -Inf, alpha = .025, beta = .1)$analysis %>% select(Time, N, Events, AHR) %>% mutate(Scenario = "Shorter delayed effect") return(ans) } ) ) %>% gt() %>% fmt_number(columns = 1:3, decimals = 0) %>% fmt_number(columns = 4, decimals = 3) %>% tab_header(title = "Sample Size and Events Required by Trial Duration", subtitle = "Delayed Effect of 4 Months, HR = 0.6 Thereafter; 90% Power")
Under the different scenarios of interest, we can examine the expected number of events in time periods of interest.
events_by_time_period <- NULL for(g in c("PH", "Shorter delayed effect", "Longer delayed effect", "Crossing")){ events_by_time_period <- rbind( events_by_time_period, AHR(enrollRates = enrollRates %>% filter(Scenario == g), failRates = failRates %>% filter(Scenario == g), totalDuration = c(12, 20, 28, 36), simple = FALSE) %>% mutate(Scenario = g)) }
Recall that our alternate hypothesis assumes no treatment effect (HR=1) for 4 months and then HR = 0.6 thereafter. For any of the above scenarios, if we wish to base a futility bound on this assumption plus the above number of events in the first 4 months and after 4 months, then we can compute the average hazard ratio under the alternate hazard ratio for each scenario at 20 months as follows. You can see that an interim futility spending bound based on the alternate hypothesis can depend fairly heavily on enrollment and the control failure rate. Note also that at the time of interim analysis, the alternate hypothesis AHR can be computed in this same fashion based on observed events by time period. Note that this can be quite different than the scenario HR; e.g., for PH, we assume HR=0.7 throughout, but for the futility bound comparison, we compute blinded AHR that decreases with each analysis under the alternate hypothesis.
# Time periods for each scenario were 0-4, 4-6, and 6+ # Thus H1 has HR as follows hr1 <- tibble(t = c(0, 4, 6), hr1 = c(1, .6, .6)) ahr_by_analysis <- events_by_time_period %>% full_join(hr1) %>% group_by(Scenario, Time) %>% summarize(AHR1 = exp(sum(Events * log(hr1))/ sum(Events))) ahr_by_analysis %>% pivot_wider(names_from = Scenario, values_from = AHR1) %>% gt() %>% fmt_number(columns=2:5, decimals = 3)
Here we assume the design is under a delayed effect model where the delay is not too long and the long-term average hazard ratio benefit is strong. proportional hazards scenario, but we look at power under the alternate scenarios. We will plan a 36 month group sequential design under the Shorter delayed effect scenario. Interim analyses are planned after 12, 20, and 28 months.
analysisTimes <- c(12, 20, 28, 36) upar <- list(sf = gsDesign::sfLDOF, total_spend = 0.025, param = NULL, timing = NULL, theta = 0) lpar <- list(sf = gsDesign::sfHSD, total_spend = .1, param = -2, timing = NULL, theta = NULL) NPHasymmetric <- gs_design_ahr(enrollRates = enrollRates, failRates = failRates, ratio = 1, alpha = .025, beta = 0.1, # Information fraction not required (but available!) analysisTimes = analysisTimes, # Function to enable spending bound upper = gs_spending_bound, lower = gs_spending_bound, # Spending function and parameters used upar = upar, lpar = lpar) summary(NPHasymmetric) %>% as_gt()
By scenario, we now wish to compute the adjusted expected futility bounds and the power implied.
do.call( rbind, lapply( c("PH", "Shorter delayed effect","Longer delayed effect", "Crossing"), function(x){ AHR1 <- (ahr_by_analysis %>% filter(Scenario == x))$AHR1 lparx <- lpar lparx$theta1 <- -log(AHR1) yy <- gs_power_ahr(enrollRates = enrollRates %>% filter(Scenario == x), failRates = failRates %>% filter(Scenario == x), events = NULL, analysisTimes = c(12, 20, 28, 36), upper = gs_spending_bound, upar = upar, lower = gs_spending_bound, lpar = lparx)$analysis %>% mutate(Scenario = x) } ) ) %>% gt() %>% fmt_number(columns = "Events", decimals = 1) %>% fmt_number(columns = 5:10, decimals = 4)
We investigate two types of the weighting scheme for weight logrank method.
The fixed design under the first weighting scheme for four scenario are summarized as follows.
do.call( rbind, lapply( c("PH", "Shorter delayed effect","Longer delayed effect", "Crossing"), function(x){ gs_design_wlr(enrollRates = enrollRates %>% filter(Scenario == x), failRates = failRates %>% filter(Scenario == x), weight = function(x, arm0, arm1){wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5, tau = 4)}, alpha = .025, beta = .1, upar = qnorm(.975), lpar = -Inf, analysisTimes = 44)$analysis %>% mutate(Scenario = x) } )) %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)
The fixed design under the second weighting scheme for four scenario are summarized as follows.
# Ignore tau or (tau can be -1) do.call( rbind, lapply( c("PH", "Shorter delayed effect","Longer delayed effect", "Crossing"), function(x){ gs_design_wlr(enrollRates = enrollRates %>% filter(Scenario == x), failRates = failRates %>% filter(Scenario == x), weight = function(x, arm0, arm1){wlr_weight_fh(x, arm0, arm1, rho = 0, gamma = 0.5)}, alpha = .025, beta = .1, upar = qnorm(.975), lpar = -Inf, analysisTimes = 44)$analysis %>% mutate(Scenario = x) } )) %>% gt() %>% fmt_number(columns = 3:6, decimals = 4)
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.