Nothing
## ---- include = FALSE---------------------------------------------------------
unlink("param_files", recursive = TRUE)
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(fs))
suppressPackageStartupMessages(library(rfacts))
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = FALSE
)
## ---- output = FALSE----------------------------------------------------------
# library(rfacts)
# facts_file <- get_facts_file_example("dichot.facts") # could be any FACTS file
# # On traditional HPC clusters, this should be a shared directory
# # instead of a temp directory:
# tmp <- fs::dir_create(tempfile())
# all_param_files <- file.path(tmp, "param_files")
#
# # Set n_weeks_files to 0 so we only read the weeks files generated by
# # trial execution mode.
# run_flfll(facts_file, all_param_files, n_weeks_files = 0L)
## -----------------------------------------------------------------------------
# param_files <- get_param_dirs(all_param_files)[1]
# basename(param_files)
## -----------------------------------------------------------------------------
# run_once <- function(index, param_files) {
# out <- tempfile()
# dir_copy(param_files, out) # Requires the fs package.
# run_engine_dichot(out, n_sims = 1L)
# pats <- read_patients(out) # Read and aggregate all the patients files.
# # Here, do some custom data processing on the whole pats data frame...
# # Write the processed patient data to the original patients files.
# overwrite_csv_files(pats)
# run_engine_dichot(
# out,
# n_sims = 1L,
# seed = index,
# mode = "r",
# execdata = "patients00001.csv", # Custom / modified patients files.
# final = TRUE
# )
# read_weeks(out)
# }
## -----------------------------------------------------------------------------
# library(dplyr)
# library(fs)
#
# # Ignore the facts_sim column since all weeks files were indexed 00000.
# # For data post-processing, use the facts_id column instead.
# lapply(seq_len(2), run_once, param_files = param_files) %>%
# bind_rows()
## -----------------------------------------------------------------------------
# # Configure clustermq to use our grid and your template file.
# # If you are using a scheduler like SGE, you need to write a template file
# # like clustermq.tmpl. To learn how, visit
# # https://mschubert.github.io/clustermq/articles/userguide.html#configuration-1
# options(clustermq.scheduler = "sge", clustermq.template = "clustermq.tmpl")
## -----------------------------------------------------------------------------
# library(clustermq)
# weeks <- Q(
# fun = run_once,
# iter = seq_len(1e3), # Run 1000 simulations.
# const = list(param_files = param_files),
# pkgs = c("fs", "rfacts"),
# n_jobs = 1e2 # Use 100 clustermq workers.
# ) %>%
# bind_rows()
Any scripts or data that you put into this service are public.
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.