inst/ubinc/scripts/ubiquity_fcns.R

#'@import cli     
#'@import deSolve
#'@import doParallel
#'@import foreach
#'@import ggplot2
#'@import knitr
#'@import optimx
#'@import onbrand
#'@import pso
#'@import rmarkdown
#'@import rhandsontable
#'@import stringr
#'@importFrom digest digest
#'@importFrom dplyr  all_of select
#'@importFrom flextable add_header add_footer align as_chunk as_paragraph autofit body_add_flextable delete_part merge_h 
#'@importFrom flextable regulartable set_header_labels theme_alafoli theme_box theme_tron_legacy 
#'@importFrom flextable theme_vanilla theme_booktabs theme_tron theme_vader theme_zebra
#'@importFrom parallel stopCluster makeCluster
#'@importFrom readxl read_xls read_xlsx
#'@importFrom magrittr "%>%"
#'@importFrom PKNCA PKNCA.options PKNCAconc PKNCAdose PKNCAdata pk.nca get.interval.cols
#'@importFrom utils capture.output read.csv read.delim txtProgressBar setTxtProgressBar write.csv tail packageVersion sessionInfo
#'@importFrom stats median qt var sd
#'@importFrom scales trans_format  math_format squish_infinite
#'@importFrom MASS mvrnorm


# These were either pulled out because they are used in the shiny app or
# because they were used in reporting and are now only used in onbrand
#   #'@import rstudioapi
#   #'@importFrom grid pushViewport viewport grid.newpage grid.layout
#   #'@importFrom gridExtra grid.arrange
#   #'@importFrom officer add_slide annotate_base body_add_break body_add_fpar body_add_par body_add_gg body_add_img 
#   #'@importFrom officer body_add_table body_add_toc body_bookmark body_end_section_continuous 
#   #'@importFrom officer body_end_section_landscape body_end_section_portrait body_replace_all_text external_img 
#   #'@importFrom officer footers_replace_all_text headers_replace_all_text layout_properties layout_summary ph_location_type 
#   #'@importFrom officer ph_location_label ph_with read_pptx read_docx shortcuts  slip_in_seqfield slip_in_text 
#   #'@importFrom officer styles_info unordered_list



#'@export
#'@title Build the System File
#'@description  Builds the specified system file creating the targets for R and other languages as well as the templates for performing simulations and estimations. 
#'
#'@param system_file name of the file defining the system in the \href{https://ubiquity.tools}{ubiquity} format (default = 'system.txt'), if the file does not exist a template will be created and compiled.
#'@param distribution indicates weather you are using a \code{'package'} or a \code{'stand alone'}
#' distribution of ubiquity. If set to \code{'automatic'} the build script will first 
#' look to see if the ubiquity R package is installed. If it is installed it
#' will use the package. Otherwise, it will assume a \code{"sand alone"} distribution.
#'@param perlcmd system command to run perl ("perl")
#'@param output_directory location to store analysis outputs (\code{file.path(".", "output")})
#'@param temporary_directory location to templates and otehr files after building the system (\code{file.path(".", "transient")})
#'@param verbose enable verbose messaging   (\code{TRUE})
#'@param ubiquity_app set to \code{TRUE} when building the system to be used with the ubiquty App (\code{FALSE})
#'@param debug Boolean variable indicating if debugging information should be displayed (\code{TRUE})
#'@return initialized ubiquity system object
#'@examples
#' \donttest{
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'}
build_system <- function(system_file          = "system.txt",
                         distribution         = "automatic",
                         perlcmd              = "perl",
                         output_directory     = file.path(".", "output"),
                         temporary_directory  = file.path(".", "transient"),
                         verbose              = TRUE,
                         ubiquity_app         = FALSE,
                         debug                = TRUE){

# If we cannot find a system file we create an empty one 
if(!file.exists(system_file)){
  cli::cli_alert_warning(paste("Unable to find system file >",system_file, "<", sep=""))
  cli::cli_alert_warning("Creating an empty template")
  sys_new_res = system_new(system_file="template", file_name=system_file)
}

 
# model base file used for the c library
system_file_full = normalizePath(system_file, winslash = "/")
if(ubiquity_app){
  system_checksum = "app_base"
} else {
  system_checksum = as.character(digest::digest(system_file_full, algo=c("md5")))
}

c_libfile_base    =  paste("ubiquity_", system_checksum, sep="")
c_libfile_base_c  =  paste("ubiquity_", system_checksum, ".c", sep="")
c_libfile_base_o  =  paste("ubiquity_", system_checksum, ".o", sep="")

temporary_directory = normalizePath(temporary_directory, winslash="/")
temp_directory  = file.path(temporary_directory, system_checksum)

# if the temporary directory does not exist we create it
if(!dir.exists(temp_directory)){
  dir.create(temp_directory, recursive=TRUE)
}

# If the distribution is set to automatic we see if the package is loaded
# If not we see if the stand alone library file is present, lastly we try to
# load the package
if(distribution == "automatic"){
  if("ubiquity" %in% (.packages())){
    distribution = "package"
  } else if(file.exists(file.path('library', 'r_general', 'ubiquity.R'))){
    source(file.path('library', 'r_general', 'ubiquity.R'))
    distribution = "stand alone"
  } 
} else if(distribution == "package"){
  # If it's set to package we make sure the package is installed and
  # if ti's not we default to stand alone
  if(system.file(package="ubiquity") == ""){
    cli_alert_warning("Warning: package selected but not found")
    distribution = "stand alone" }
}


# Checking for perl
if(as.character(Sys.which(perlcmd )) == ""){
  stop("No perl interpreter found")
}


pkgs = c("deSolve", "ggplot2", "readxl", "cli")
invisible(system_req(pkgs))

# For stand alone distributions we just use the default template and transient
# directory
if(distribution == "stand alone"){
  templates       = file.path(getwd(), "library", "templates")
  build_script_pl = "build_system.pl"
}

# For the package we pull the package install location to point to files
# needed to build the system
if(distribution == "package"){
  package_dir     = system.file("", package="ubiquity")
  templates       = file.path(package_dir, "ubinc", "templates")
  build_script_pl = file.path(package_dir, "ubinc", "perl",  "build_system.pl")
}


cfg = list()


if(file.exists(system_file)){
  if(verbose == TRUE){
    cli::cli_h1(paste("Building the system: ", system_file, sep=""))
    cli::cli_alert(c("ubiquity:     ", cli::col_blue(style_underline(style_bold("https://r.ubiquity.tools")))))
    if(distribution == "package"){
      cli::cli_alert(c("Distribution: ",  cli::col_blue(style_underline(paste0(distribution, " (", packageVersion("ubiquity"), ")", sep="")))))
    } else {
      cli::cli_alert(c("Distribution: ",  cli::col_blue(style_underline(paste0(distribution)))))
    }
  }

  
  build_command = sprintf('%s "%s" "%s" "%s" "%s" "%s" "%s"', 
                          perlcmd, build_script_pl, system_file_full, 
                          temp_directory, templates, distribution, c_libfile_base)
  output = system(build_command, intern=TRUE)
  
  # CFILE is used to indicate if we have compiled and loaded the CFILE successfully 
  # We defalut to TRUE and then set it to false below if there are any problems encountered.
  CFILE = TRUE
  
  if(length(output) > 0){
    cli::cli_alert_warning("Build reported errors and")
    cli::cli_alert_warning("may have failed, see below:")
    for(line in output){
      cli::cli_alert_warning(line)
    }
    rm('line')
  }
  
  #
  # Cleaning up any older versions of the C file
  #
  # if it's loaded we remove it from memory:
  if((c_libfile_base %in% names(getLoadedDLLs()))){
    dyn.unload(getLoadedDLLs()[[c_libfile_base]][["path"]])
    }
  
  # making the output directory to store generated information
  if(!file.exists(output_directory)){
    if(verbose == TRUE){
      cli::cli_alert("Creating output directory")
    }
    dir.create(output_directory, recursive=TRUE)
  }
  
  #next we remove any files to make sure we start from scratch
  if(file.exists(file.path(temp_directory, paste(c_libfile_base, .Platform$dynlib.ext, sep = "")))){
     file.remove(file.path(temp_directory, paste(c_libfile_base, .Platform$dynlib.ext, sep = ""))) }
  if(file.exists(file.path(temp_directory, c_libfile_base_o))){
     file.remove(file.path(temp_directory, c_libfile_base_o)) }
  
  # Now we compile the C file
  if(verbose == TRUE){
    cli::cli_alert("Compiling C version of system")
  }
  # Command used to compile C version of the model:
  #compile_cmd =  paste(file.path(R.home("bin"), "R"), ' CMD SHLIB "', file.path(temp_directory, c_libfile_base_c), '"', sep="")
  compile_cmd =  paste(file.path(R.home("bin"), "R"), ' CMD SHLIB "', c_libfile_base_c, '"', sep="")

  if(file.exists(file.path(temp_directory, 'r_ode_model.c'))){
    # storing the working directory and 
    # changing the working directory to the
    # temp directory to avoid weird issues
    # with spaces in file names and paths
    # Copying the generated c file to the checksummed base file name
    file.copy(from =file.path(temp_directory, "r_ode_model.c"), 
              to   =file.path(temp_directory, c_libfile_base_c), 
              overwrite=TRUE)
    # Compling the C file
    current_dir = getwd()
    setwd(temp_directory)
    on.exit( setwd(current_dir))
    output =  system(compile_cmd, intern=TRUE) 
    setwd(current_dir)

    if("status" %in% names(attributes(output))){
      if(verbose == TRUE){
        if(debug == TRUE){
          for(line in output){
            cli::cli_alert_danger(paste("DEBUG:", line, sep=" "))
          }
        }
        cli::cli_alert_danger("Failed: Unable to compile C file") 
        if(debug == TRUE){
          cli::cli_alert_danger("See above for more details")
        }
      }
      CFILE = FALSE
    }else{
      # Loading the shared library
      if(verbose == TRUE){
        cli::cli_alert("Loading the shared C library") }
      dyn.load(file.path(temp_directory, paste(c_libfile_base, .Platform$dynlib.ext, sep = "")))
    }
    if(verbose == TRUE){
      cli::cli_alert_success('System built')
      cli::cli_alert_info('To fetch a new analysis template use {.code system_fetch_template}')
      cli::cli_alert_info('For example:')
      cli::cli_alert_info('  fr = system_fetch_template(cfg, template = "Simulation")')
      cli::cli_alert_info('  fr = system_fetch_template(cfg, template = "Estimation")')
    }
  }else{
    if(verbose == TRUE){
      cli::cli_alert_danger(paste("Failed: file", file.path(temp_directory, c_libfile_base_c), " not found "))
    }
    CFILE = FALSE
  }
  

  if(CFILE == FALSE){
    if(verbose == TRUE){
      cli::cli_alert_warning("C model not available. Compile manually using the") 
      cli::cli_alert_warning("following command to debug:          ") 
     #cli::cli_alert_warning(paste("system('", compile_cmd,"')"))
      cli::cli_alert_warning(sprintf("system('R CMD SHLIB \"%s%sr_ode_model.c\"')", temp_directory, .Platform$file.sep))
    }
  }
  
  # Returning the ubiquity model object:
  if(file.exists(file.path(temp_directory, "auto_rcomponents.R"))){
    source(file.path(temp_directory, "auto_rcomponents.R"))
    eval(parse(text=paste0("cfg = system_fetch_cfg_", c_libfile_base, "()")))

    # storing the output directory
    cfg$options$misc$output_directory =  output_directory 
  } 
  
} else {
  cli::cli_alert_danger(paste("Still unable to find system file >", system_file,"<", sep=""))
}
return(cfg)}

# -------------------------------------------------------------------------
#'@export 
#'@title Fetch Ubiquity Workshop Sections
#'@description With the ubiquity package this function can be used to fetch
#' example files for different sections of the workshop.
#'
#'@param section Name of the section of workshop to retrieve  ("Simulation")
#'@param overwrite if \code{TRUE} the new workshop files will overwrite any existing files present (\code{FALSE})
#'@param copy_files if \code{TRUE} the files will be written to the output_directory, if \code{FALSE} only the names and locations of the files will be returned (\code{TRUE})
#'@param output_directory directory where workshop files will be placed (getwd())
#'@details Valid sections are "Simulation", "Estimation", "Titration" "Reporting", and "NCA"
#'
#'@return list
#'@examples
#' \donttest{
#' workshop_fetch("Estimation", output_directory=tempdir(), overwrite=TRUE)
#'}
workshop_fetch <- function(section          = "Simulation", 
                           overwrite        = FALSE,
                           copy_files       = TRUE,
                           output_directory = getwd()){
  res = list()
  allowed = c("Simulation", "Estimation", "Titration", "Reporting", "Testing", "NCA")

  isgood = TRUE
  # This function only works if we're using the package
  if(!(system.file(package="ubiquity") == "") |
       dir.exists(file.path("examples", "R"))){
    if(section %in% allowed){
    
      if(!(system.file(package="ubiquity") == "") ){
        src_dir = system.file("ubinc", "scripts", package="ubiquity")
        csv_dir = system.file("ubinc", "csv",     package="ubiquity")
        sys_dir = system.file("ubinc", "systems", package="ubiquity")
      } else {
        src_dir = file.path("examples", "R")
        csv_dir = file.path("examples", "R")
        sys_dir = file.path("examples")
      }

      sources      = c()
      destinations = c()
      write_file   = c()

      if(section=="Simulation"){
         sources      = c(file.path(src_dir, "analysis_single.r"            ),
                          file.path(src_dir, "analysis_multiple.r"          ),
                          file.path(src_dir, "analysis_multiple_file.r"     ),
                          file.path(sys_dir, "system-mab_pk.txt"            ),
                          file.path(csv_dir, "mab_pk_subjects.csv"))
         destinations = c("analysis_single.r",
                          "analysis_multiple.r",
                          "analysis_multiple_file.r",
                          "system.txt",
                          "mab_pk_subjects.csv")
         write_file   = c(TRUE, TRUE, TRUE, TRUE, TRUE)
      } else if(section=="Estimation") {
         sources      = c(file.path(src_dir, "analysis_parent.r"                    ),
                          file.path(src_dir, "analysis_parent_metabolite.r"         ),
                          file.path(src_dir, "analysis_parent_metabolite_global.r"  ),
                          file.path(src_dir, "analysis_parent_metabolite_nm_data.r" ),
                          file.path(sys_dir, "system-adapt.txt"                     ),
                          file.path(csv_dir, "pm_data.csv"                          ),
                          file.path(csv_dir, "nm_data.csv"                          ))
         destinations = c("analysis_parent.r",                   
                          "analysis_parent_metabolite.r",        
                          "analysis_parent_metabolite_global.r",  
                          "analysis_parent_metabolite_nm_data.r", 
                          "system.txt",
                          "pm_data.csv",                          
                          "nm_data.csv"                                             )
         write_file   = c(TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE)
      } else if(section=="Reporting") {
         sources      = c(file.path(src_dir, "make_report_PowerPoint.R"), 
                          file.path(src_dir, "make_report_Word.R"), 
                          file.path(sys_dir, "system-mab_pk.txt"))
         destinations = c("make_report_PowerPoint.R",
                          "make_report_Word.R", 
                          "system.txt")

         write_file   = c(TRUE, TRUE, TRUE)
      } else if(section=="Titration") {
         sources      = c(file.path(src_dir, "analysis_repeat_dosing.r"                     ),
                          file.path(src_dir, "analysis_repeat_infusion.r"                   ),
                          file.path(src_dir, "analysis_state_reset.r"                       ),
                          file.path(src_dir, "analysis_visit_dosing_titration.r"            ),
                          file.path(src_dir, "analysis_visit_dosing_titration_stochastic.r" ),
                          file.path(src_dir, "analysis_visit_infusion_dosing.r"             ),
                          file.path(sys_dir, "system-mab_pk.txt"                            ))
         destinations = c("analysis_repeat_dosing.r",                    
                          "analysis_repeat_infusion.r",                  
                          "analysis_state_reset.r",                       
                          "analysis_visit_dosing_titration.r",            
                          "analysis_visit_dosing_titration_stochastic.r", 
                          "analysis_visit_infusion_dosing.r",                           
                          "system.txt")
         write_file   = c(TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE)
      } else if(section=="Testing") {
         sources      = c(file.path(src_dir, "workshop_test.R"))
         destinations = c("workshop_test.R")
         write_file   = c(TRUE)
      } else if(section=="NCA") {
         sources      = c(file.path(src_dir, "analysis_nca_md.R"            ),
                          file.path(src_dir, "analysis_nca_sd.R"            ),
                          file.path(src_dir, "analysis_nca_sparse.R"        ),
                          file.path(src_dir, "nca_generate_data.R"          ),
                          file.path(sys_dir, "system-mab_pk.txt"            ),
                          file.path(csv_dir, "pk_all_md.csv"                ),
                          file.path(csv_dir, "pk_all_sd.csv"                ),
                          file.path(csv_dir, "pk_sparse_sd.csv"))
         destinations = c("analysis_nca_md.R"      ,                        
                          "analysis_nca_sd.R"      ,                        
                          "analysis_nca_sparse.R"  ,                        
                          "nca_generate_data.R"    ,                        
                          "system-mab_pk.txt"      ,
                          "pk_all_md.csv"          ,
                          "pk_all_sd.csv"          ,
                          "pk_sparse_sd.csv")
         write_file   = rep(TRUE, length(sources))

      }

      # if overwrite ifs FALSE we check each of the destination files to see if
      # they exist. Then we set write_file to FALSE if they do exist, and throw
      # up an error.
      if(!overwrite){
        for(fidx in 1:length(destinations)){
          if(copy_files){
            if(file.exists(file.path(output_directory, destinations[fidx]))){
              write_file[fidx] = FALSE 
            }
          } else {
              # If we're not copying the files then we set
              # the write_file flag to FALSE
              write_file[fidx] = FALSE
          }
        }
      }

      # storing the details in res
      res$sources      = sources
      res$destinations = destinations
      res$write_file   = write_file

      # next we write the files that are TRUE
      for(fidx in 1:length(destinations)){
        if(write_file[fidx]){
          file.copy(sources[fidx], file.path(output_directory, destinations[fidx]), overwrite=TRUE)
          cli::cli_alert(paste("Creating file:", file.path(output_directory, destinations[fidx] )))
        } else {
          isgood = FALSE
          cli::cli_alert_warning(paste("File:", file.path(output_directory, destinations[fidx]), "exists, and was not copied."))
          cli::cli_alert_warning(      "Set overwrite=TRUE to force this file to be copied.")
        }
      }
    } else {
      isgood = FALSE
      cli::cli_alert_danger(paste("section >", section, "< is not valid must be one of: ", paste(allowed, collapse=", "), sep=""))
    }

  } else {
    isgood = FALSE
    cli::cli_alert("workshop_fetch()")
    cli::cli_alert("Unable to find ubiquity package or stand alone distribution files")
  }


  res$isgood = isgood

return(res)}
# -------------------------------------------------------------------------
#'@export
#'@title Create New \code{system.txt} File 
#'
#'@description  Copy a blank template (\code{system_file="template"}) file to the working directory or an example by specifying the following:
#'
#' \itemize{
#'   \item \code{"template"} - Empty system file template
#'   \item \code{"adapt"} - Parent/metabolite model taken from the adapt manual used in estimation examples [ADAPT]
#'   \item \code{"two_cmt_cl"} - Two compartment model parameterized in terms of clearances 
#'   \item \code{"one_cmt_cl"} - One compartment model parameterized in terms of clearances 
#'   \item \code{"two_cmt_micro"} - Two compartment model parameterized in terms of rates (micro constants)
#'   \item \code{"one_cmt_micro"} - One compartment model parameterized in terms of rates (micro constants)
#'   \item \code{"mab_pk"} - General compartmental model of mAb PK from Davda 2014 [DG]
#'   \item \code{"pbpk"} - PBPK model of mAb disposition in mice from Shah 2012 [SB]
#'   \item \code{"pbpk_template"} - System parameters from Shah 2012 [SB] have been defined for all species along with the set notation to be used as a template for developing models with physiological parameters
#'   \item \code{"pwc"} - Example showing how to make if/then or piece-wise continuous variables  
#'   \item \code{"tmdd"} - Model of antibody with target-mediated drug disposition
#'   \item \code{"tumor"} - Transit tumor growth model taken from Lobo 2002 [LB] 
#' }
#'
#'@param file_name name of the new file to create   
#'@param system_file name of the system file to copy
#'@param overwrite if \code{TRUE} the new system file will overwrite any existing files present
#'@param output_directory \code{getwd()} directory where system file will be placed
#'
#'@return \code{TRUE} if the new file was created and \code{FALSE} otherwise
#'
#' @details 
#'
#' References
#'
#' \itemize{
#' \item{[ADAPT]} Adapt 5 Users Guide \url{https://bmsr.usc.edu/files/2013/02/ADAPT5-User-Guide.pdf}
#' \item{[DG]} Davda et. al. mAbs (2014) 6(4):1094-1102  \doi{10.4161/mabs.29095}
#' \item{[LB]} Lobo, E.D. & Balthasar, J.P. AAPS J (2002) 4, 212-222  \doi{10.1208/ps040442}
#' \item{[SB]} Shah, D.K. & Betts, A.M. JPKPD (2012) 39 (1), 67-86 \doi{10.1007/s10928-011-9232-2}
#'}
#'
#'
#'
#'@examples
#' \donttest{
#' # To create an example system file named example_system.txt:
#' system_new(system_file      = "mab_pk", 
#'            file_name        = "system_example.txt", 
#'            overwrite        = TRUE,  
#'            output_directory = tempdir())
#'}
system_new  <- function(file_name        = "system.txt", 
                        system_file      ="template", 
                        overwrite        = FALSE,  
                        output_directory = getwd()){

 # Getting a list of the system files
 sfs = system_new_list()

 isgood = FALSE

 output_file = file.path(output_directory, file_name)

 if(system_file %in% names(sfs)){
   write_file = TRUE
   # if ovewrite is false we check to see if the destination file exists. If it
   # does exist, we ste write_file to false
   if(!overwrite){
     if(file.exists(output_file)){
       cli::cli_alert_danger(paste("Error the file >", output_file, "< exists set overwrite=TRUE to overwrite", sep=""))
       write_file = FALSE}
   }

    # if the source file exists and write_file is true then
    # we try to copy the file
    file_path = sfs[[system_file]][["file_path"]]
    if(file.exists(file_path) & write_file){
      isgood = file.copy(file_path, output_file, overwrite=TRUE)
    }
 } else{
   cli::cli_alert_danger(paste("The system file tempalte >", system_file, "< is invalid", sep=""))
   cli::cli_alert_danger(paste("Please choose one of the following:", sep=""))
   for(sf in names(sfs)){
     cli::cli_alert_danger(paste(stringr::str_pad(sf, pad=" ", side="right", width=15), "| ", sfs[[sf]][["description"]], sep=""))
   }
 }
isgood}
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
#'@export
#'@title Fetch List of Available System Templates
#'
#'@description  Returns a list of internal templates with descriptions of their contents and file locations
#'
#' @return list with the template names as the keys
#' \itemize{
#' \item{file_path} Full path to the system file
#' \item{description} Description of what this system file provides
#'}
#'
#'@examples
#' # To get a list of systems
#' systems = system_new_list()
system_new_list  <- function(){

 sfs = list(template      = list(file_path = NULL, description="Empty template file."),      
            mab_pk        = list(file_path = NULL, description="Human antibody compartmental PK with IIV (Davda 2014)"),         
            pbpk          = list(file_path = NULL, description="Full antibody PBPK model (Shah 2012)"),          
            pwc           = list(file_path = NULL, description="Example of how to define picewise continuous functions (if/then statements)"), 
            pbpk_template = list(file_path = NULL, description="Template file with PBPK parameters for multiple species coded mathematical set examples. "), 
            tumor         = list(file_path = NULL, description="Tumor inhibition model (Lobo 2002) with mathematical set examples"),  
            tmdd          = list(file_path = NULL, description="Full TMDD model with examples of how to code the same system as both ODEs and processes"),          
            adapt         = list(file_path = NULL, description="Parent metabolite model taken from the Adapt user manual"),          
            one_cmt_micro = list(file_path = NULL, description="One compartment model with micro-constants"), 
            one_cmt_cl    = list(file_path = NULL, description="One compartment model with clearances"),  
            two_cmt_micro = list(file_path = NULL, description="Two compartment model with micro-constants"), 
            two_cmt_cl    = list(file_path = NULL, description="Two compartment model with clearances"))  

  for(system_file in names(sfs)){

    # If the package is installed we pull it from there:
    if((system.file(package="ubiquity") != "")){
      if(system_file == "template"){
        file_path       = system.file("ubinc",    "templates", "system_template.txt", package="ubiquity")
      } else {
        file_path       = system.file("ubinc",    "systems", sprintf('system-%s.txt',system_file), package="ubiquity")
      }
    } 
    else {
      if(system_file == "template"){
        file_path       = file.path('library', 'templates',  'system_template.txt')
      } else {
        file_path       = file.path('examples', sprintf('system-%s.txt',system_file))
      }
    }


    # storing the file path
    sfs[[system_file]][["file_path"]] = file_path
  }

sfs}

# -------------------------------------------------------------------------

# -------------------------------------------------------------------------

#'@export
#'@title Create New Analysis Template 
#'
#'@description Building a system file will produce templates for R and other languages.
#' This function provides a method to make local copies of these templates.
#'
#'@param cfg ubiquity system object    
#'@param template template type  
#'@param overwrite if \code{TRUE} the new system file will overwrite any existing files present
#'@param output_directory directory where workshop files will be placed (getwd())
#'
#'@return List with vectors of template \code{sources}, \code{destinations}
#' and corresponding write success (\code{write_file}), also a list element
#' indicating the overall success of the function call (\code{isgood})
#'
#'@details The template argument can have the following values for the R
#'workflow:
#'
#' \itemize{
#'  \item{"Simulation"}       produces \code{analysis_simulate.R}: R-Script named with placeholders used to run simulations
#'  \item{"Estimation"}       produces \code{analysis_estimate.R}: R-Script named with placeholders used to perform naive-pooled parameter estimation
#'  \item{"NCA"}              produces \code{analysis_nca.R}: R-Script to perform non-compartmental analysis (NCA) and report out the results
#'  \item{"ShinyApp"}         produces \code{ubiquity_app.R}, \code{server.R} and \code{ui.R}: files needed to run the model through a Shiny App either locally or on a Shiny Server
#'  \item{"Model Diagram"}    produces \code{system.svg}: SVG template for producing a model diagram (Goto \url{https://inkscape.org} for a free SVG editor)
#'  \item{"Shiny Rmd Report"} produces \code{system_report.Rmd} and \code{test_system_report.R}: R-Markdown file used to generate report tabs for the Shiny App and a script to test it
#'  \item{"myOrg"}            produces \code{myOrg.R}: R-Script for defining functions used within your organization
#'}
#'
#'And this will create files to use in other software:
#'
#' \itemize{
#'  \item{"Adapt"}            produces \code{system_adapt.for} and \code{system_adapt.prm}: Fortran and parameter files for the currently selected parameter set in Adapt format.
#'  \item{"Berkeley Madonna"} produces \code{system_berkeley_madonna.txt}: text file with the model and the currently selected parameter set in Berkeley Madonna format
#'  \item{"nlmixr"}           produces \code{system_nlmixr.R} For the currently selected parameter set to define the system in the `nlmixr` format.
#'  \item{"NONMEM"}           produces \code{system_nonmem.R} For the currently selected parameter set as a NONMEM conntrol stream.
#'  \item{"Monolix"}          produces \code{system_monolix.txt} For the currently selected parameter set as a NONMEM conntrol stream.
#'  \item{"mrgsolve"}         produces \code{system_mrgsolve.cpp}: text file with the model and the currently selected parameter set in mrgsolve format  
#'}
#'
#'
#'@examples
#' \donttest{
#' # Creating a system file from the mab_pk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#'
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'
#' # Creating a simulation template
#' fr =  system_fetch_template(cfg, 
#'       template         = "Simulation", 
#'       output_directory = tempdir())
#'}
system_fetch_template  <- function(cfg, template="Simulation", overwrite=FALSE, output_directory=getwd()){

 res = list()
 # These are the allowed templates:
 allowed = c("Simulation", "Estimation", 
             "ShinyApp",   "Shiny Rmd Report",
             "NCA", 
             "mrgsolve",   
             "myOrg", 
             "Model Diagram",
             "Berkeley Madonna", 
             "Adapt", "nlmixr",
             "NONMEM", "Monolix",
             "mrgsolve")


 # default value for the return variable
 isgood = TRUE 

 if(template %in% allowed){
   # first we look to see if the package is installed, if it's not
   # we look for the system_template.txt file 
   if((system.file(package="ubiquity") != "")){
     template_dir = system.file("ubinc", "templates", package="ubiquity")
   } 
   else {
     template_dir = file.path('library', 'templates')
   }
   temp_directory = cfg[["options"]][["misc"]][["temp_directory"]]

   # pulling the current parameter set
   current_set = cfg[["parameters"]][["current_set"]]
  
   # building up the lists of sources and destinations
   sources      = c()
   destinations = c()
   write_file   = c()

   if(template == "Simulation"){
     sources      = c(file.path(temp_directory, "auto_simulation_driver.R"))
     destinations = c("analysis_simulate.R")
     write_file   = c(TRUE)
   }
   if(template == "Estimation"){
     sources      = c(file.path(temp_directory, "auto_analysis_estimation.R"))
     destinations = c("analysis_estimate.R")
     write_file   = c(TRUE)
   }
   if(template == "NCA"){
     sources      = c(file.path(template_dir, "r_nca.R"))
     destinations = c("analysis_nca.R")
     write_file   = c(TRUE)
   }
   if(template == "ShinyApp"){
     sources      = c(file.path(template_dir, "ubiquity_app.R"), 
                      file.path(template_dir, "ubiquity_server.R"),
                      file.path(template_dir, "ubiquity_ui.R"))
     destinations = c("ubiquity_app.R", "server.R", "ui.R")
     write_file   = c(TRUE, TRUE, TRUE)
   }
   if(template == "Shiny Rmd Report"){
     sources      = c(file.path(template_dir, "r_system_report.Rmd"),
                      file.path(template_dir, "r_test_rmd.R"))
     destinations = c("system_report.Rmd", "test_system_report.R")
     write_file   = c(TRUE, TRUE)
   }
   if(template == "mrgsolve"){
     sources      = c(file.path(temp_directory, sprintf("target_mrgsolve-%s.cpp",current_set)))
     destinations = c("system_mrgsolve.cpp")
     write_file   = c(TRUE)
   }
   if(template == "Berkeley Madonna"){
     sources      = c(file.path(temp_directory, sprintf("target_berkeley_madonna-%s.txt",current_set)))
     destinations = c("system_berkeley_madonna.txt")
     write_file   = c(TRUE)
   }
   if(template == "Adapt"){
     sources      = c(file.path(temp_directory, sprintf("target_adapt_5.for")),
                      file.path(temp_directory, sprintf("target_adpat_5-%s.prm",current_set)))
     destinations = c("system_adapt.for", "system_adapt.prm")
     write_file   = c(TRUE, TRUE)
   }
   if(template == "myOrg"){
     sources      = c(file.path(template_dir, sprintf("report.yaml")))
     destinations = c("myOrg.yaml")
     write_file   = c(TRUE)
   }

   if(template == "Model Diagram"){
     sources      = c(file.path(template_dir, sprintf("system.svg")))
     destinations = c("system.svg")
     write_file   = c(TRUE)
   }
   if(template == "NONMEM"){
     sources      = c(file.path(temp_directory, sprintf("target_nonmem-%s.ctl",current_set)))
     destinations = c("system_nonmem.ctl")
     write_file   = c(TRUE, TRUE)
   }
   if(template == "Monolix"){
     sources      = c(file.path(temp_directory, sprintf("target_monolix-%s.txt",current_set)))
     destinations = c("system_monolix.txt")
     write_file   = c(TRUE, TRUE)
   }
   if(template == "nlmixr"){
     sources      = c(file.path(temp_directory, sprintf("target_nlmixr-%s.R",current_set)))
     destinations = c("system_nlmixr.R")
     write_file   = c(TRUE, TRUE)
   }

   # if overwrite ifs FALSE we check each of the destination files to see if
   # they exist. Then we set write_file to FALSE if they do exist, and throw
   # up an error.
   if(!overwrite){
     for(fidx in 1:length(destinations)){
       if(file.exists(file.path(output_directory, destinations[fidx]))){
         write_file[fidx] = FALSE 
       }
     }
   }

   # storing the details in res
   res$sources      = sources
   res$destinations = destinations
   res$write_file   = write_file
  

   # next we write the files that are TRUE
   for(fidx in 1:length(destinations)){
     if(write_file[fidx]){
       file.copy(sources[fidx], file.path(output_directory, destinations[fidx]), overwrite=TRUE)
       vp(cfg, sprintf("Creating file: %s", file.path(output_directory, destinations[fidx])))
     } else {
       isgood = FALSE
       vp(cfg, sprintf("File: %s, exists, and was not copied.", file.path(output_directory, destinations[fidx])))
       vp(cfg, sprintf("Set overwrite=TRUE to force this file to be copied."))
     }
   }
 } else {
   isgood = FALSE
   vp(cfg, sprintf("Template type: %s not recognized", template))
   vp(cfg, sprintf(" must be one of: %s", paste(allowed, collapse=', ')))
 }

  if(!isgood){
    vp(cfg, "ubiquity::system_fetch_template()")
    vp(cfg, "One or more templates failed to copy. See messages above for details")
  }
  res$isgood = isgood
return(res)}
# -------------------------------------------------------------------------
# cfg = system_load_data(cfg, dsname, data_file, data_sheet)
#
#'@export
#'@title Loading Datasets 
#'@description Loads datasets at the scripting level from  a variable if
#' \code{data_file} is a data.frame or from the following
#' formats (based on the file extension)
#'\itemize{
#' \item csv - comma delimited 
#' \item tab - tab delimited
#' \item xls or xlsx - excel spread sheet
#'}
#'
#' Multiple datasets can be loaded as long as they are given different
#' names. Datasets should be in a NONMEM-ish format with the
#' first row containing the column header names.
#'
#'@param cfg ubiquity system object    
#'@param dsname short name of the dataset to be used to link this dataset to different operations
#'@param data_file the file name of the dataset or a data frame containing the data 
#'@param data_sheet argument identifying the name of the sheet in an excel file
#' 
#'@return Ubiquity system object with the dataset loaded
system_load_data <- function(cfg, dsname, data_file, data_sheet){

  if(is.data.frame(data_file)){
    cfg$data[[dsname]]$values = data_file
    cfg$data[[dsname]]$data_file$name  = "From data frame"
  }
  else{
    # Reading the data based on the file extension
    if(file.exists(data_file)){
      if(regexpr(".xls$", as.character(data_file), ignore.case=TRUE) > 0){
        cfg$data[[dsname]]$values = as.data.frame(readxl::read_xls(path=data_file, sheet=data_sheet))
        cfg$data[[dsname]]$data_file$sheet  = data_sheet
      }

      if(regexpr(".xlsx$", as.character(data_file), ignore.case=TRUE) > 0){
        cfg$data[[dsname]]$values = as.data.frame(readxl::read_xlsx(path=data_file, sheet=data_sheet))
        cfg$data[[dsname]]$data_file$sheet  = data_sheet
      }


      if(regexpr(".csv$", as.character(data_file), ignore.case=TRUE) > 0){
        cfg$data[[dsname]]$values = read.csv(data_file, header=TRUE)
      }

      if(regexpr(".tab$", as.character(data_file), ignore.case=TRUE) > 0){
        cfg$data[[dsname]]$values = read.delim(data_file, header=TRUE)
      }
      cfg$data[[dsname]]$data_file$name  = data_file
    } else {
      vp(cfg, " ------------------------------------") 
      vp(cfg, "ubiquity::system_load_data()") 
      vp(cfg, sprintf("unable to find the specified file >%s<", data_file)) 
      vp(cfg, " ------------------------------------")
    
    }
  }

  return(cfg)
}


#'@export
#'@title Selecting Parameter Sets
#'@description The system file can contain multiple parameterizations using
#' the \code{<PSET:?:?>?} notation. This function provides the means for
#' switching between these parameterizations, and (optionally) specifying a
#' subset of parameters estimated when performing parameter estimation. 
#'
#'@param cfg ubiquity system object    
#'@param set_name string containing the name of the parameter set
#'@param parameter_names list of parameter names to be estimated 
#'
#'@return Ubiquity system object with the specified parameter set active
#'
#'@examples
#' \donttest{
#' # Creating a system file from the mab_pk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#' 
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#' 
#' # Selecting the default parameter set
#' cfg = system_select_set(cfg, "default")
#'}
system_select_set = function(cfg, set_name='default', parameter_names=NULL){
#
# takes the system information variable cfg and makes the values in the string
# 'set name'  the active values
#

# defining parameters for the current set
if(is.null(cfg$parameters$sets[[set_name]])){
  vp(cfg,sprintf('Warning: Could not find set: %s', set_name))
  vp(cfg,sprintf('   Returning the default set instead'))
  set_name = 'default'
  cfg$parameters$matrix$value = cfg$parameters$sets$default$values
  cfg$parameters$current_set  = 'default'
  }

  cfg$parameters$matrix$value  = cfg$parameters$sets[[set_name]]$values
  cfg$parameters$current_set   = set_name
  p_idx = 1
  for(p_name in names(cfg$options$mi$parameters)){
    eval(parse(text=sprintf('cfg$parameters$values$%s = cfg$parameters$matrix$value[[p_idx]]', p_name)))
    p_idx = 1+p_idx
  }

  cfg$parameters$values = as.data.frame(cfg$parameters$values);

# checking to make sure the values specified in parameter_names are 
# actual parameters :)
if(!is.null(parameter_names)){
  # parameter names selected for estimation that do not exist
  mpn = setdiff(parameter_names, names(cfg$options$mi$parameters))
  if(length(mpn) > 0){
    parameter_names = NULL
    vp(cfg, sprintf('The following parameters were selected'))
    vp(cfg, sprintf('to be estimated but have not been defined:'))
    vp(cfg, sprintf('  %s', paste(mpn, collapse=',                ')))
    vp(cfg, sprintf('Check your spelling or create  this parameter '))
    vp(cfg, sprintf('in the system file using the <P> descriptor   '))
    vp(cfg, sprintf('Defaulting to _ALL_ parameters being estimated'))
  }

}

# if the parameter_names list is null we select them all for estimation
if(is.null(parameter_names)){
  parameter_names = names(cfg$options$mi$parameters)
}

tmp_to_estimate_system   = c()
tmp_to_estimate_variance = c() 

# ordering the parameters system and then variance
for(p_name in parameter_names){
  if(cfg$parameters$matrix[cfg$parameters$matrix$name == p_name, ]$ptype == "system"){
    tmp_to_estimate_system = c(tmp_to_estimate_system, p_name) }
  else{
    tmp_to_estimate_variance = c(tmp_to_estimate_variance, p_name) }
}

tmp_to_estimate_all = c(tmp_to_estimate_system, tmp_to_estimate_variance)


# setting objective function type:
if(length(tmp_to_estimate_variance) == 0){
  cfg$estimation$objective_type = 'wls' }
else{
  cfg$estimation$parameters$system = length(tmp_to_estimate_system);
  cfg$estimation$objective_type = 'ml' }


cfg$estimation$parameters$matrix = c()

#
# Storing the parameter information for estimation
# this is a reduced set of parameters (those that are being estimated)
p_idx = 1
# Initializing the guess list
cfg$estimation$parameters$guess = list()
cfg$estimation$mi               = list()
for(p_name in tmp_to_estimate_all){
  # matrix
  cfg$estimation$parameters$matrix = 
       rbind(cfg$estimation$parameters$matrix, cfg$parameters$matrix[cfg$parameters$matrix$name ==  p_name, ])
  # indices for mapping
  cfg$estimation$mi[[p_name]] = p_idx
  # vector of guesses
  eval(parse(text=sprintf('cfg$estimation$parameters$guess$%s = cfg$parameters$values$%s', p_name, p_name)))
  p_idx = p_idx + 1;
}

cfg$estimation$parameters$guess = unlist(as.data.frame(cfg$estimation$parameters$guess))


# defining covariates
for(cov_name in names(cfg$options$inputs$covariates)){
  # checking to see if the current covariate (cov_name) has a value specified
  # for the current parameter set (set_name). If it doesn't then the default
  # is used. If it does then these parameter set specific values are used
  if(is.null(cfg$options$inputs$covariates[[cov_name]]$parameter_sets[[set_name]])){
    cfg$options$inputs$covariates[[cov_name]]$times$values  = cfg$options$inputs$covariates[[cov_name]]$parameter_sets$default$times
    cfg$options$inputs$covariates[[cov_name]]$values$values = cfg$options$inputs$covariates[[cov_name]]$parameter_sets$default$values
  }
  else{
    cfg$options$inputs$covariates[[cov_name]]$times$values  = cfg$options$inputs$covariates[[cov_name]]$parameter_sets[[set_name]]$times
    cfg$options$inputs$covariates[[cov_name]]$values$values = cfg$options$inputs$covariates[[cov_name]]$parameter_sets[[set_name]]$values
  }
}


# defining the iivs
if(!is.null(cfg$iiv)){
  if(set_name %in% names(cfg$iiv$sets)){
    iiv_set_name = set_name }
  else{
    iiv_set_name = 'default' }
   
  # indices
  cfg$options$mi$iiv  = cfg$options$mi$iiv_sets[[iiv_set_name]]

  # iiv details
  cfg$iiv$current_set = iiv_set_name
  cfg$iiv$iivs        = cfg$iiv$sets[[iiv_set_name]]$iivs 
  cfg$iiv$parameters  = cfg$iiv$sets[[iiv_set_name]]$parameters
  cfg$iiv$values      = cfg$iiv$sets[[iiv_set_name]]$values
}

return(cfg)
}


# parameters = system_fetch_parameters(cfg)
#
#'@export
#'@title Fetch System Parameters
#'
#'@description
#' Fetch the parameters of the currently selected parameter set. To switch
#' between parameter sets use \code{\link{system_select_set}}
#'
#'@param cfg ubiquity system object    
#'
#'@return List of parameters for the selected parameter set
#'
#'@examples
#' \donttest{
#' # Creating a system file from the mab_pk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#' 
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'
#' # Fetching the default parameter set
#' parameters = system_fetch_parameters(cfg)
#'}
system_fetch_parameters <- function(cfg){
  return(cfg$parameters$values)}

#'@export
#'@title Fetch Mathematical Set 
#'
#'@description
#' Fetch the elements of the specified mathematical set that was defined in the system file.
#'
#'@param cfg ubiquity system object    
#'@param set_name name of mathematical set
#'
#'@return A sequence containing the elements of the parameter set or NULL if if there was a problem.
#'
#'@examples
#' \donttest{
#' # Creating a system file from the pbpk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "pbpk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#' 
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'
#' # Fetching the contents of the ORG mathematical set
#' ORG_elements = system_fetch_set(cfg, "ORG")
#'}
system_fetch_set <- function(cfg, set_name=NULL){
  set_contents = NULL
  isgood = TRUE

  if(set_name %in% names(cfg$options$math_sets)){
    set_contents = cfg$options$math_sets[[set_name]]
  } else {
    isgood = FALSE
    vp(cfg, paste("Error: mathematical set: >", set_name ,"< was not defined", sep=""))
    if(length(names(cfg$options$math_sets)) > 0){
      vp(cfg, paste("The following sets are defined for this system")) 
      vp(cfg, paste(names(cfg$options$math_sets), collapse=", "))
    } else {
      vp(cfg, "There are no sets defined for this system") }
  }

  if(!isgood){
    vp(cfg, "ubiquity::system_fetch_set()")
  }
  
  return(set_contents)}

#'@export
#'@title Fetch Variability Terms
#'@description Extract elements of the current variance/covariance matrix
#' specified in the system file with \code{<IIV:?:?> ?}, \code{<IIVCOR:?:?>?}, \code{<IIVSET:?:?> ?}, \code{<IIVCORSET:?:?>?}
#'
#'@param cfg ubiquity system object    
#'@param IIV1 row name of the variance/covariance matrix
#'@param IIV2 column name of the variance/covariance matrix 
#'
#'@return Value from the variance/covariance matrix   
#'@examples
#' \donttest{
#' # Creating a system file from the mab_pk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#' 
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'
#' # Covariance term for ETACL and ETAVc
#' val = system_fetch_iiv(cfg, IIV1="ETACL", IIV2="ETAVc")
#'}
#'@seealso \code{\link{system_set_iiv}}
system_fetch_iiv <- function(cfg, IIV1, IIV2){
  
  VALUE =  -1
  if("iiv" %in% names(cfg)){
    
    IIV1_idx = match(c(IIV1), names(cfg$iiv$iivs))
    IIV2_idx = match(c(IIV2), names(cfg$iiv$iivs))
    
    
    if(is.na(IIV1_idx)){
      vp(cfg, sprintf("IIV %s not found", IIV1)) 
    }else if(is.na(IIV1_idx)){
      vp(cfg, sprintf("IIV %s not found", IIV2)) 
    }else{
      VALUE =  cfg$iiv$values[IIV1_idx, IIV2_idx]
    }
  } else {
    vp(cfg, "ubiquity::system_fetch_iiv() ")
    vp(cfg, "No IIV information was found") 
    vp(cfg, "These can be specified using: ") 
    vp(cfg, "<IIV:?>, <IIV:?:?>, and <IIVCOR:?:?> ")
  }
return(VALUE)}


#'@export
#'@title Zero All Model Inputs
#'@description Multiple default inputs can be specified in the system file. At
#' the scripting level this function can be used to set all inputs to zero.
#' Then only the subsequently specified inputs will be applied.
#'
#'@param cfg ubiquity system object    
#'@param bolus Boolean value indicating weather bolus inputs should be set to zero
#'@param rates Boolean value indicating weather infusion rate inputs should be set to zero
#'
#'@return Ubiquity system object with the specified inputs set to zero
#'
#'@examples
#' \donttest{
#' # Creating a system file from the mab_pk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#' 
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'
#' # Clear only infusion rates
#' cfg = system_zero_inputs(cfg, bolus=TRUE, rates=FALSE)
#'
#' # Clear all inputs:
#' cfg = system_zero_inputs(cfg)
#'}
#'@seealso \code{\link{system_set_rate}}, \code{\link{system_set_bolus}}
system_zero_inputs <- function(cfg, bolus=TRUE, rates=TRUE){
  # zeroing out the bolus values
  if(bolus == TRUE){
    if('bolus' %in% names(cfg$options$inputs)){
      # first we add a dummy bolus at time 0
      cfg$options$inputs$bolus$times$values = c(0)
      # now we add a zero bolus for each species
      for(species in names(cfg$options$inputs$bolus$species)){
        cfg$options$inputs$bolus$species[[species]]$values = c(0)
      }
    }
  }
  
  # next we zero out all of the rate inputs as well
  if(rates == TRUE){
    for(rate    in  names(cfg$options$inputs$infusion_rates)){
      cfg$options$inputs$infusion_rates[[rate]]$times$values  = c(0)
      cfg$options$inputs$infusion_rates[[rate]]$levels$values = c(0)
    }
  }
return(cfg)}

#'@export
#'@title Set Covariate Values
#'@description Covariates specified in the system file using  \code{<CV:?>}
#' and \code{<CVSET:?:?>} will have their default values for a given parameter
#' set. This function is a means to overwrite those values.
#'
#'@param cfg ubiquity system object    
#'@param covariate name of the covariate
#'@param times list of times (system time units)
#'@param values corresponding list of values 
#'
#'@return Ubiquity system object with the covariate set
#'
#'@examples
#' \donttest{
#' # Creating a system file from the mab_pk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#' 
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'
#' # Setting the covariate WT to 50
#' cfg = system_set_covariate(cfg, 
#'                            covariate = "WT",
#'                            times     = c(0), 
#'                            values    = c(50))
#'}
system_set_covariate <- function(cfg, covariate, times, values){
  isgood = TRUE
  if(!(length(times) == length(values)) ) {
    vp(cfg, "The times and values have differnt lengths") 
    isgood = FALSE
    }
  if(!(covariate %in% names(cfg$options$inputs$covariates))){
    vp(cfg, sprintf("The covariate name %s could not be found", covariate)) 
    isgood = FALSE
  }
  if(isgood){
    cfg$options$inputs$covariates[[covariate]]$times$values  = times 
    cfg$options$inputs$covariates[[covariate]]$values$values = values
  } else {
    vp(cfg, sprintf(" Something went wrong and the covariate, ")) 
    vp(cfg, sprintf(" was not set, see the messages above.")) }

return(cfg)}


#'@export
#'@title Set Infusion Rate Inputs
#'@description Defines infusion rates specified in the system file using  \code{<R:?>}
#'
#'@param cfg ubiquity system object    
#'@param rate name of infusion rate    
#'@param times list of time values   
#'@param levels corresponding list of infusion values   
#'
#'@return Ubiquity system object with the infusion rate set
#'
#'@examples
#' \donttest{
#' # Creating a system file from the mab_pk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#' 
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'
#' # Clearing all inputs
#' cfg = system_zero_inputs(cfg)
#'
#' # 5 minute infusion at 10 mg/min
#' cfg = system_set_rate(cfg,
#'            rate   = "Dinf",
#'            times  = c(0,  5), 
#'            levels = c(10, 0))
#'}
#'@seealso \code{\link{system_zero_inputs}}
system_set_rate <- function(cfg, rate, times, levels){
  isgood = TRUE
  if(!(length(times) == length(levels)) ) {
    vp(cfg, "The times and levels have differnt lengths") 
    isgood = FALSE
    }
  if(!(rate %in% names(cfg$options$inputs$infusion_rates))){
    vp(cfg, sprintf("The rate name %s could not be found", rate)) 
    isgood = FALSE
  }
  if(isgood){
    cfg$options$inputs$infusion_rates[[rate]]$times$values  = times 
    cfg$options$inputs$infusion_rates[[rate]]$levels$values = levels
  } else {
    vp(cfg, "Something went wrong and the rate, ") 
    vp(cfg, "was not set, see the messages above.") }
return(cfg)}

#'@export
#'@title Setting Analysis Options
#'@description Different options associated performing analyses (e.g running
#' simulations, performing parameter estimation, logging, etc.) can be set
#' with this function
#'
#'@param cfg ubiquity system object    
#'@param group options are grouped together by the underlying activity being performed: "estimation",  "general", "logging", "simulation", "solver", "stochastic", or "titration"
#'@param option for each group there are a set of options 
#'@param value corresponding value for the option 
#'
#'@return Ubiquity system object with the option set
#'
#'@details 
#'
#' \bold{\code{group="estimation"}}
#'
#' The default estimation in R is performed using either the \code{optim} or \code{optimx} libraries.
#' This is selected by setting the \code{optimizer} option:
#'  
#' \preformatted{
#'cfg = system_set_option(cfg, 
#'                        group  = "estimation",
#'                        option = "optimizer",
#'                        value  = "optim")
#' }
#'  
#' The optimization routine then specified using the \code{method}. By default this \code{option} is
#' set to \code{Nelder-Mead}.
#'  
#' \preformatted{
#'cfg = system_set_option(cfg, 
#'                        group  = "estimation",
#'                        option = "method",
#'                        value  = "Nelder-Mead")
#' }
#'  
#' And different attributes are then selected using the control.
#'  
#' \preformatted{
#'cfg = system_set_option(cfg, 
#'                        group  = "estimation",
#'                        option = "control",
#'                        value  = list(trace  = TRUE,
#'                                      maxit  = 500,
#'                                      REPORT = 10))
#' }
#' 
#' For the different methods and control options, see the documentation for the \code{optim}
#' and \code{optimx} libraries.
#'
#' To perform a global optimization you can install either the particle swarm (\code{pso})
#' genetic algorithm (\code{GA}) libraries.
#' To use the particle swarm set the \code{optimizer} and \code{method}:
#'  
#' \preformatted{
#'cfg = system_set_option(cfg, 
#'                        group  = "estimation",
#'                        option = "optimizer",
#'                        value  = "pso")
#'
#'cfg = system_set_option(cfg, 
#'                        group  = "estimation",
#'                        option = "method",
#'                        value  = "psoptim")
#' }
#' 
#' The control option is a list described \code{pso} documentation.
#'
#' To use the genetic algorithm set the optimizer and method:
#' 
#' \preformatted{
#'cfg = system_set_option(cfg, 
#'                        group  = "estimation",
#'                        option = "optimizer",
#'                        value  = "ga")
#'
#'cfg = system_set_option(cfg, 
#'                        group  = "estimation",
#'                        option = "method",
#'                        value  = "ga")
#' }
#' 
#' The control option is a list and the list elements are the named options in the GA
#' documentation. Use the following as an example:
#' 
#' \preformatted{
#'cfg = system_set_option(cfg, 
#'                        group  = "estimation",
#'                        option = "control",
#'                        value  = list(maxiter  = 10000,
#'                                     optimArgs = list(
#'                                       method  = "Nelder-Mead",
#'                                       maxiter = 1000)))
#' }
#' 
#' To alter initial guesses see: \code{\link{system_set_guess}}
#'
#' When performing parameter estimation, the internal function
#' \code{system_od_general} is used. This is the function that simulates your
#' system at the conditions defined for the different cohorts. This is pretty
#' flexible but if you want to go beyond this you can set the
#' \code{observation_function} option:
#'
#' \preformatted{
#'cfg = system_set_option(cfg, 
#'                        group  = "estimation",
#'                        option = "observation_function",
#'                        value  = "my_od")
#' }
#'
#' That will instruct the optimziation routines to use the user defined
#' function \code{my_od}. You will need to construct that function to have the
#' same input/output format as \code{\link{system_od_general}}.
#'
#' \bold{\code{group=general}}
#'
#' \itemize{
#' \item \code{"output_directory"}   = String where analysis outputs will be
#'     placed. Generally you wont want to change this, but it can be useful in Shiny
#'     apps where you need to have each shiny user generate output in that
#'     users directory : \code{file.path(".", "output")}
#' }
#'
#' \bold{\code{group=logging}}
#'
#' By default ubiquity prints different information to the console and logs this
#' information to a log file. The following options can be used to control
#' this behavior:
#'
#' \itemize{
#' \item \code{"enabled"}   = Boolean variable to control logging: \code{TRUE}
#' \item \code{"file"}      = String containing the name of the log file: \code{file.path("transient", "ubiquity_log.txt")}
#' \item \code{"timestamp"} = Boolean switch to control appending a time stamp to log entries: \code{TRUE}
#' \item \code{"ts_str"}    = String format of timestamp: "%Y-%m-%d %H:%M:%S"
#' \item \code{"debug"}     = Boolean switch to control debugging (see below): \code{FALSE}
#' \item \code{"verbose"}   = Boolean switch to control printing to the console \code{FALSE}
#' }
#'
#'
#'
#' To enable debugging of different functions (like when performing esitmation), 
#' set the \code{debug} option to \code{TRUE}. Important function calls will be 
#' trapped and information will be logged and reported to the console.
#'
#' \preformatted{
#'cfg = system_set_option(cfg, 
#'                        group  = "estimation",
#'                        option = "debug",
#'                        value  = FALSE)
#'}
#'
#' \bold{\code{group="simulation"}}
#'\itemize{
#' \item \code{"include_important_output_times"} - Automatically add bolus, infusion rate switching times, etc: \code{"yes"}(default), \code{"no"}.
#' \item \code{"integrate_with"} - Specify if the ODE solver should use the Rscript (\code{"r-file"}) or compiled C (\code{"c-file"}), if the build process can compile and load the C version it will be the default otherwise it will switch over to the R script.
#' \item \code{"output_times"} - Vector of times to evaulate the simulation (default \code{seq(0,100,1)}).
#' \item \code{"solver"} - Selects the ODE solver: \code{"lsoda"} (default), \code{"lsode"}, \code{"vode"}, etc.; see the documentation for \code{\link[deSolve]{deSolve}} for an exhaustive list.
#' \item \code{"sample_bolus_delta"} - Spacing used when sampling around bolus events (default \code{1e-6}). 
#' \item \code{"sample_forcing_delta"} - Spacing used when sampling around forcing functions (infusion rates, covariates, etc) (default \code{1e-3}). 
#' }
#'
#' \bold{\code{group=solver}}
#'
#' Depending on the solver, different options can be set. The documentation
#' for  \code{\link[deSolve]{deSolve}} lists the different solvers. For a full list of options, see the
#' documentation for the specific solver (e.g. \code{?lsoda}). Some common options
#' to consider are:
#' \itemize{
#' \item \code{"atol"} - Relative error tolerance
#' \item \code{"rtol"} - Absolute error tolerance
#' \item \code{"hmin"} - Minimum integration step size
#' \item \code{"hmax"} - Maximum integration step size
#' }
#' To select the \code{vode} solver and set the maximum step size to 0.01, the
#' following would be used:
#' \preformatted{
#'cfg=system_set_option(cfg,
#'                      group  = "simulation",
#'                      option = "solver", 
#'                      value  = "vode")
#'
#'cfg=system_set_option(cfg,
#'                      group  = "solver",
#'                      option = "hmax", 
#'                      value  = 0.01)
#' }
#'
#' \bold{\code{group="stochastic"}}
#'
#' When running stochastic simulations (inter-individual variability applied to system
#' parameters) it can be useful to specify the following:
#' \itemize{
#'  \item\code{"ci"} - Confidence interval (default \code{95})
#'  \item\code{"nsub"} - Number of subjects (default \code{100})
#'  \item\code{"seed"} - Seed for the random numebr generator (default \code{8675309})
#'  \item\code{"ponly"} - Only generate the subject parameters but do not run the simulations (default \code{FALSE})
#'  \item\code{"ssp"} - A list of the calculated static secondary parameters to include (default all parameters defined by \code{<As>})
#'  \item\code{"outputs"} - A list of the predicted outputs to include (default all outputs defined by \code{<O>})
#'  \item\code{"states"} - A list of the predicted states to include(default all states)
#'  \item\code{"sub_file"} - Name of data set loaded with (\code{\link{system_load_data}}) containing subject level parameters and coviariates
#'  \item\code{"sub_file_sample"} - Controls how subjects are sampled from the dataset
#'  }
#'
#' If you wanted to generate \code{1000} subjects but only wanted the parameters, you would
#' use the following:
#' \preformatted{
#'cfg = system_set_option(cfg,
#'                        group  = "stochastic", 
#'                        option = "nsub ",
#'                        value  = 1000)
#'
#'cfg = system_set_option(cfg,
#'                        group  = "stochastic", 
#'                        option = "ponly",
#'                        value  = TRUE )
#' }
#'
#'
#' If you wanted to exclude both states and secondary parameters, while only including 
#' the output \code{Cp_nM}, you would do the following:
#' \preformatted{
#'
#'cfg = system_set_option (cfg, 
#'                         group  = "stochastic",
#'                         option = "ssp",
#'                         value  = list())
#'
#'cfg = system_set_option (cfg, 
#'                         group  = "stochastic",
#'                         option = "states",
#'                         value  = list())
#'
#'cfg = system_set_option (cfg, 
#'                         group  = "stochastic",
#'                         option = "outputs",
#'                         value  = c("Cp_nM")) 
#' }
#'
#' To pull subject information from a data file instead of generating the subject
#' parameters from IIV information the \code{sub_file} option can be used. The value here
#' \code{SUBFILE_NAME} is the name given to a dataset loaded with
#' (\code{\link{system_load_data}}):
#'
#' \preformatted{
#'cfg=system_set_option(cfg, 
#'                      group  = "stochastic",
#'                      option = "sub_file",
#'                      value  = "SUBFILE_NAME")
#' }
#'  
#' Sampling from the dataset can be controlled using the \code{sub_file_sample} option:
#'  
#' \preformatted{
#'cfg=system_set_option(cfg, 
#'                      group  = "stochastic",
#'                      option = "sub_file_sample",
#'                      value  = "with replacement")
#' }
#'  
#' Sampling can be done sequentially (\code{"sequential"}), with replacement
#' (\code{"with replacement"}), or without replacement (\code{"without replacement"})
#'
#' \bold{\code{group="titration"}}
#'
#' \code{"titrate"} - By default titration is disable (set to \code{FALSE}). If you are
#' going to use titration, enable it here by setting this option to \code{TRUE}.
#' This will force #' \code{\link{simulate_subjects}} to use 
#' \code{\link{run_simulation_titrate}} internally when running simulations.
#'
system_set_option <- function(cfg, group, option, value){
 
  groups = c('general', 'solver', 'stochastic', 'simulation', 'estimation', 'logging', 'titration')
  
  errormsgs = c()
  # checking the user input
  isgood = TRUE
  if(group %in% groups){
    #
    # Loading required packages based on options selected 
    #
    if(group == "general" & option == "output_directory"){
      # we're going to make sure the directory exists
      if(!dir.exists(value)){
         if(!dir.create(value)){
           isgood = FALSE
           errormsgs = c(errormsgs, paste("unable to create output_directory >", value,"<", sep=""))
           errormsgs = c(errormsgs, paste("output_directory not set"))
         }
      }

      if(isgood){
        cfg[["options"]][["misc"]][["output_directory"]] = value
      }
    }

    if(group == "simulation" & option == "parallel"){
      if(value == "multicore"){
        if(!system_req("doParallel")){
          isgood = FALSE
          errormsgs = c(errormsgs, "Unable to load the doParallel package")
          errormsgs = c(errormsgs, 'install.packages("doParallel")')
        }
        if(!system_req("foreach")){
          isgood = FALSE
          errormsgs = c(errormsgs, "Unable to load the foreach package")
          errormsgs = c(errormsgs, 'install.packages("foreach")')
        }
        if(!isgood){
          errormsgs = c(errormsgs, "Unable to load one or more packages needed for the  multicore option") }
      }
    }

    if(group == "estimation" & option == "optimizer"){
      if(value == "pso"){
        if(!system_req("pso")){
          isgood = FALSE
          errormsgs =  c(errormsgs, errormsgs, "Unable to load the particle swarm optimizer (pso) package")
          errormsgs =  c(errormsgs, errormsgs, 'install.packages("pso")')
        }
      }
    }
    if(group == "estimation" & option == "optimizer"){
      if(value == "ga"){
        if(!system_req("GA")){
          isgood = FALSE
          errormsgs = c(errormsgs, "Unable to load the Genetic Algoriths (GA) package")
          errormsgs = c(errormsgs, 'install.packages("GA")')
        }
      }
    }

    if(group == "estimation" & option == "observation_function"){
      if(!exists(value, mode="function")){
        isgood = FALSE
        errormsgs = c(errormsgs, "Unable to set the observation_function")
        errormsgs = c(errormsgs, paste0('The user defined function >', value, '< ', "was not found."))
        errormsgs = c(errormsgs, paste0("You must create this object before setting this option."))
      }
    }

    if(isgood){
      # setting stochastic options
      if(group == 'stochastic'){
        if((option == "states") | (option == "outputs")){
          for(val in value){
            if(!(val %in% names(cfg$options$mi[[option]]))){
              errormsgs = c(errormsgs, paste(option, " >", val, "< not found", sep=""))
              isgood = FALSE
            }
          } 
        }
        if((option == "ssp")){
          for(val in value){
            if(!(val %in% names(cfg[["options"]][["ssp"]]))){
              errormsgs = c(errormsgs, paste("static secondary parameter (ssp) >", val, "< not found", sep=""))
              isgood = FALSE
            }
          } 
        }

        # Making sure the specified dataset is loaded
        if(option == "sub_file"){
          # If value is NULL then we're disabling the sub_file
          if(!is.null(value)){
            # if it's not null we want to check if the dataset has been
            # defined
            if(!(value %in% names(cfg$data))){
              errormsgs = c(errormsgs, paste("Error: dataset >", value, "< not found, please load first", sep=""))
              errormsgs = c(errormsgs, "using system_load_data()")
              isgood = FALSE
            }
          }
        }
        if(option == "sub_file_sample"){
          if(!any(value == c("with replacement", "sequential", "without replacement"))){
            errormsgs = c(errormsgs,  paste("The value", toString(value), "is invalid and must be one of the following"))
            errormsgs = c(errormsgs,        "  sequential          - sample from data file sequentially")
            errormsgs = c(errormsgs,        "  with replacement    - sample from data file with replacement")
            errormsgs = c(errormsgs,        "  without replacement - sample from data file with out replacement")
            isgood = FALSE
          }
        }


        if(isgood){
          cfg$options$stochastic[[option]] = value
        }else{
           errormsgs = c( errormsgs,  paste("The following option >", option, "< is not valid", sep=""))
        }
        
        
        }
      
      # setting simulation options
      if(group == "simulation"){
        cfg$options$simulation_options[[option]] = value}


      # titration options
      if(group == 'titration'){
        if(option == "titrate")
          if(is.logical(value)){
             cfg$titration$titrate = value
          }
          else{
             errormsgs = c(errormsgs, "The titrate option should be TRUE or FALSE")
             isgood = FALSE
          
          }
        }
        
      # setting solver options
      if(group == 'solver'){
        cfg$options$simulation_options$solver_opts[[option]] = value}

      # setting logging options
      if(group == 'logging'){
        cfg$options$logging[[option]] = value}
      

      # setting estimation options
      if(group == 'estimation'){
        cfg$estimation$options[[option]] = value}
    }
    
  } else {
    # flagging a bad group
    isgood = FALSE
    errormsgs = c(errormsgs, paste("The specified group >", group,"< is invalid", sep=""))
    errormsgs = c(errormsgs, "Valid groups are:")
    for(valid in groups){
      errormsgs = c(errormsgs, paste("   ->", valid))}
  }
  
  
  # If the error flag has been switched above, then we print some inforamtion for the user
  if(!isgood){
    vp(cfg, "ubiquity::system_set_option()                 ", "h1") 
    vp(cfg, "Something went wrong and the option ") 
    vp(cfg, "was not set:")
    vp(cfg, errormsgs)
    }
  
return(cfg)}

#'@export
#'@title Titration Rules
#'@description Defines a new titration rule and the times when that rule is evaluated
#'
#'@param cfg ubiquity system object    
#'@param name name for the titration rule
#'@param times list of times when the rule will be evaluated 
#'@param timescale time scale associated with the titration times (as defined by \code{<TS:?>})
#'
#'@return Ubiquity system object with the titration rule created
#'
#'@details
#' \preformatted{
#'cfg = system_new_tt_rule(cfg,
#'                         name      = "rname",
#'                         times     = c(0, 2, 4),
#'                         timescale = "weeks")'
#' }
#' A titration rule identifies a set of times (\code{times}) and an associated time
#' scale (\code{timescale}) in which titration events can potentially occur. Any
#' times scale, as defined in the system file with \code{<TS:?>}, can be used in
#' place of "weeks" above. The \code{name}, \code{"rname"} above, is used to link the
#' titration rule to different conditions discussed below. The name should be
#' a string beginning with a letter, and it can contain any combination of
#' numbers, letters, and underscores. With the rule created we can then add conditions to that rule.'
#'
#'@seealso \code{\link{system_set_tt_cond}}, \code{\link{run_simulation_titrate}}
system_new_tt_rule <- function(cfg, name, times, timescale){

  isgood = TRUE
  # empty list holding the new titration inforamtion

  errormsgs = c()

  if(!timescale %in% names(cfg$options$time_scales)){
    isgood = FALSE
    errormsgs = c(errormsgs, paste("The timescale: >", timescale, "< was not defined", sep=""))
  }

  # checking the timescale to make sure it's been defined
  if(isgood){
  titrate = list()
  # storing the times and timescale
  titrate$times     = times
  titrate$timescale = timescale
  # converting those times to simtimes
  titrate$simtimes  = system_ts_to_simtime(cfg, times, timescale)

  # Storing the titration information in cfg
  cfg$titration$rules[[name]] = titrate
  }

  if(!isgood){
    vp(cfg, "ubiquity::system_new_tt_rule()", fmt="h1") 
    vp(cfg, "Something went wrong and the        ") 
    vp(cfg, "titration rule was not set          ") 
    vp(cfg, errormsgs) 
    }
return(cfg)
}


#'@export
#'@title Define Titration Triggers and Actions
#'@description Once a rule has been defined using
#' \code{\link{system_new_tt_rule}}, it can then be used by specifying checks at
#' each of the titration time points that, when true, will perform some actions. 
#'
#'@param cfg ubiquity system object    
#'@param name string containing the name for the titration rule to which this condition applies
#'@param cond string that evaluates a boolean value that is \code{TRUE} when the action should be triggered
#'@param action stringing that evaluates to what should be done when the condition is met (e.g. changing the dose, state change, etc) 
#'@param value code to be stored in the titration history to track when this condition has been triggered
#'
#'@return Ubiquity system object with the titration condition defined
#'
#'
#'@details
#'
#' The general syntax for setting a new condition is:
#'
#' \preformatted{
#'cfg = system_new_tt_cond(cfg,
#'                         name   = "rname",
#'                         cond   = "BOOLEAN EXPRESSION",
#'                         action = "EXPRESSION",
#'                         value  = "VALUE")
#'}
#'
#' The \code{name}
#' input will associate this condition with a previously defined rule. For each
#' time defined when the rule was created, the condition (\code{cond}) will be
#' evaluated. If that condition evaluates as \code{TRUE} then the \code{action} will be
#' evaluated. Lastly, when a condition action is evaluated, the \code{value} is stored
#' in the titration history.
#'
#' Multiple conditions can be associated with a rule. The internal titration
#' history will track each one where a condition has been evaluated as true, but
#' the simulation output will only show the \bold{last} condition to be evaluated as
#' true.
#'
#' The \code{cond} field is a string that, when evaluated, will produce a boolean value
#' (\code{TRUE} or \code{FALSE}). If you simply want to force an action at each of the times
#' for a given rule you can use: \code{cond = "TRUE"}. Alternatively you can provide
#' mathematical expressions or even complicated user defined functions.
#'
#' The \code{action} field is evaluated when \code{cond} is true. To modify how a simulation
#' is going to be performed, you will want to modify the \code{SIMINT_cfgtt}
#' variable using the different system commands. Certain common tasks have
#' prototype functions created to make it easier for the user:
#' \itemize{
#' \item \code{SI_TT_BOLUS} - Set bolus dosing
#' \item \code{SI_TT_RATE} - Set infusion inputs
#' \item \code{SI_TT_STATE} - Reset system states
#' }
#'
#' \bold{Note:} Protype functions are strings but sometimes it is necessary to
#' specify strings within this string. For the main string use double quotes (")
#' and for the internal strings use single quotes (')
#'
#' \bold{\code{SI_TT_BOLUS}}
#'
#' The simplest way to apply a bolus when the condition is true is to use the following:
#'
#' \preformatted{
#'action = "SI_TT_BOLUS[state=’At’, 
#'                      values=c(10, 10, 10), 
#'                      times=c(0, 1, 2)]"
#' }
#'
#' The \code{values} and \code{times} are vectors of numbers of equal length. The dosing and
#' time units are those specified in the \code{system.txt} file for the \code{<B:?>} delimiter. The
#' times are relative to the titration time. So \code{0} above means at the titration time.
#'
#' It’s possible to specify an interval and a number of times to repeat the last dose
#' using the following:
#'
#' \preformatted{
#'action = "SI_TT_BOLUS[state    = ’At’, 
#'                      values   = c(5, 5, 10), 
#'                      times    = c(0, 2, 4), 
#'                      repdose  = ’last’, 
#'                      number   = 7, 
#'                      interval = 4]"
#' }
#'
#' This will give a dose of \code{5} at the titration point and \code{2} time units later. The dose of \code{10}
#' at time \code{4} will be repeated \code{7} times every \code{4} time units. So a total of 8 (\code{7 + 1}) doses
#' at \code{10} will be administered. Remember the time units were those defined in \code{<B:?>}.
#' The input \code{repdose} can be either \code{’last’} or \code{’none’}.
#'
#' \bold{Note:} The main string is in double quotes \code{" "} but the strings in the protype
#' argument (e.g. \code{’last’}) are in single quotes \code{’ ’}.
#'
#' \bold{\code{SI_TT_RATE}} 
#'
#' If you created an infusion named \code{Dinf} using \code{<R:?>} and the infusion units
#' are min (times) and mg/min (rates). To have a 60 minute infusion of 20
#' mg/min then we would do the following:
#'
#' \preformatted{
#'action = "SI_TT_RATE[rate=’Dinf’, times=c(0, 60), levels=c(20.0, 0)]"
#' }
#'
#' If we wanted to do this every day for 9 more days (a total of 10 days) we can repeat
#' the sequence:
#'
#' \preformatted{
#'action = "SI_TT_RATE[rate     = ’Dinf’, 
#'                     times    = c(0, 60), 
#'                     levels   = c(20, 0), 
#'                     repdose  = ’sequence’, 
#'                     number   = 9, 
#'                     interval = 24*60]"
#' }
#'
#' The input \code{repdose} can be either \code{’sequence’} or \code{’none’}.
#'
#' \bold{Note:} The time units and dosing rate are those specified using \code{<R:?>}.
#'
#' \bold{\code{SI_TT_STATE}} 
#'
#' To provide fine control over states at titration points the state reset
#' prototype is provided. For example, if you are modeling an assay where
#' there is a wash step and you want to drop a concentration to zero. If you
#' have a state named \code{Cc} defined in your \code{system.txt} and you want to set
#' it to \code{0.0} in a condition the following action would work.
#'
#' \preformatted{
#'action = "SI_TT_STATE[Cc][0.0]"
#' }
#'
#' The value here is a number but you can use any mathematical
#' combination of variables available in the titration environment. Also you
#' can create your own user function and place the function call within the
#' brackets above.
#'
#' \bold{Titration Environment}
#'
#' The \code{cond}, \code{action}, and \code{value} statements can use any variables available in
#' the titration environment. If you want to perform complicated actions, you can
#' simply create a user defined functions and pass it the variables from the
#' titration environment that you need. These include named variables from the
#' model as well as internal variables used to control the titration.
#'
#' \bold{States and Parameters}
#'
#' System parameters (\code{<P>}), static secondary parameters (\code{<As>}) and 
#' the initial value of covariates are available. Also the state values 
#' (at the current titration time) can be used. These are all available as 
#' the names specified in the \code{system.txt} file. Since system resets
#' (\code{SI_TT_STATE}) are processed first, any changes made to states are 
#' the values that are active for other actions.
#'
#' \bold{Internal Simulation Variables}
#'
#' Internal variables are used to control titration activities. These variables can also be used in the conditions and actions.
#'
#' \itemize{
#'   \item \code{SIMINT_p} - list of system parameters
#'   \item \code{SIMINT_cfg} - system configuration sent into the titration routine
#'   \item \code{SIMINT_cfgtt}- system configuration at the current titration event time
#'   \item \code{SIMINT_ttimes} - vector of titration times (in simulation units)
#'   \item \code{SIMINT_ttime} - current titration time  (in simulation units)
#'   \item \code{SIMINT_tt_ts} - list of time scales for the current titration
#'   \item \code{SIMINT_history} - data frame tracking the history of conditions that evaluated true with the following structure:
#'   \item \itemize{
#'         \item \code{tname} - name of titration rule
#'         \item \code{value} - value indicating condition that was satisfied
#'         \item \code{simtime} - simulation time when that rule/value were triggered
#'         \item \code{timescale} -  time at the rule timescale when that rule/value were triggered
#' }
#' }
#'
#' \bold{Individual Simulations}
#'
#' To run an individual titration simulation use the following:
#'
#' \preformatted{
#'som = run_simulation_titrate(parameters, cfg)
#' }
#'
#'  This provides the same output as \code{\link{run_simulation_ubiquity}} with
#'  two extra fields. The first, \code{som$titration}, contains three columns for each
#'  titration rule. The columns will have a length equal and corresponding to the
#'  simulation times. If the rule name is rname, then the column headers will have
#'  the following names and meanings:
#' \itemize{
#'   \item \code{tt.rname.value} - Value of the rule for the active condition or -1 if not triggered
#'   \item \code{tt.rname.simtime} - Simulation time where the last condition became active
#'   \item \code{tt.rname.timescale} - Simulation time in the time scale the rule was specified in
#' }
#'
#'  The second field is \code{som$titration_history} which contains a summary list of all of the titration events that were triggered.
#' \itemize{
#'    \item \code{tname} - Titration rule name
#'    \item \code{value} - Value of the rule for the active condition or -1 if not triggered   
#'    \item \code{simtime} - Simulation time where the last condition became active
#'    \item \code{timescale} - Simulation time in the time scale the rule was specified in
#' }
#' 
#' To convert this structured list into a data frame the \code{\link{som_to_df}} command can be used:
#' 
#' \preformatted{
#'sdf = som_to_df(cfg, som)
#' }
#'
#' To run stochastic titration simulations, the same function is used:
#'
#' \preformatted{
#'som = simulate_subjects(parameters, cfg)
#' }
#'
#' This will add a data a list element called \code{som$titration} with three
#' fields for each titration rule:
#'
#' \itemize{
#'   \item \code{tt.rname.value} - Value of the rule for the active condition or -1 if not triggered
#'   \item \code{tt.rname.simtime} - Simulation time where the last condition became active
#'   \item \code{tt.rname.timescale} - Simulation time in the time scale the rule was specified in
#' } 
#'
#' Each of these fields is a matrix with an entry for each simulation time
#' (column) and each subject (row). This data structure can also be converted to
#' a data frame using \code{som_to_df}.
#' 
#'
#'@seealso \code{\link{system_new_tt_rule}}, \code{\link{run_simulation_titrate}},  \code{\link{som_to_df}}, \code{\link{simulate_subjects}} 
system_set_tt_cond <- function(cfg, name, cond, action, value='-1'){

  isgood = TRUE

  errormsgs = c()

  if(!(name %in% names(cfg$titration$rules))){
    errormsgs = c(errormsgs, paste( "The rule >", name, "< was not found, first create the rule using system_new_tt_rule then add conditions", sep=""))
    isgood = FALSE
  }


  action_parsed = action
  value_parsed = value

  # creating an empty condition
  if(isgood){
    tc = list()
    tc$cond          = cond
    tc$action        = action
    tc$value         = value

    
    # parsing the action
    action_parsed = parse_patterns(cfg, action)


    tc$action_parsed = action_parsed
    tc$value_parsed  = value_parsed
    # adding the condition to the list of conditions for the current rule
    if(is.null(cfg$titration$rules[[name]]$conditions)){
      cname = 'c1'
    } else {
      cname = sprintf('c%d', (length(names(cfg$titration$rules[[name]]$conditions))+1)) }
    cfg$titration$rules[[name]]$conditions[[cname]] = c(tc)

  }

  if(!isgood){
    vp(cfg, "ubiquity::system_set_tt_cond()",  fmt="h1") 
    vp(cfg, "Something went wrong and the        ") 
    vp(cfg, "titration condition was not set     ") 
    vp(cfg, errormsgs) 
    }


return(cfg)
}

#'@export
#'@title Parse String for Prototype Functions
#'@keywords internal
#'@description A string can contain any number of prototype functions, and this function will find them and replace them with the actual R code.
#'
#'@param cfg ubiquity system object    
#'@param str string
#'
#'@return String with the prototype functions replaced
parse_patterns  <- function(cfg, str){

  patterns = list()

  # newstr will have the string with the substitutions
  newstr = str

  # List of the possible patterns
  patterns$bolus$pattern = 'SI_TT_BOLUS['
  patterns$bolus$replace = 'SIMINT_cfgtt = system_set_tt_bolus(cfg=SIMINT_cfgtt, SIMINT_ARG_1,  tt_ts=SIMINT_tt_ts,  tsinfo=SIMINT_scales)'
  patterns$bolus$narg    = 1;

  patterns$rate$pattern  = 'SI_TT_RATE['
  patterns$rate$replace  = 'SIMINT_cfgtt = system_set_tt_rate(cfg=SIMINT_cfgtt, SIMINT_ARG_1,  tt_ts=SIMINT_tt_ts,  tsinfo=SIMINT_scales)'
  patterns$rate$narg     = 1;

  patterns$state$pattern = 'SI_TT_STATE['
  patterns$state$replace = 'SIMINT_ARG_1 = SIMINT_ARG_2; SIMINT_IC[["SIMINT_ARG_1"]] = SIMINT_ARG_2'
  patterns$state$narg    = 2;


  # We loop through each pattern and see if it's in the string
  # if it's in the string we replace it over and over again 
  # until we get them all
  for(pname in names(patterns)){


    found_pname = FALSE
    found_error = FALSE

    # if we find the pattern for pname in the string
    # we indicate using the found variable and set the 
    # error counter to 1
    if(grepl(patterns[[pname]]$pattern, newstr, fixed=TRUE)){
      error_cntr  = 1
      errormsg    = 'None'
      found_pname = TRUE }
   
   
    while(found_pname){
    
      # attempting to replace the first instance of the pattern
      # storing the parse results in pr
      pr = find_bracketed_arguments(str     = newstr,
                                    pattern = patterns[[pname]]$pattern,
                                    replace = patterns[[pname]]$replace,
                                    narg    = patterns[[pname]]$narg)

      # if the parsing was successful
      # we store the new_string list element in newstr
      if(pr$isgood){
        newstr = pr$new_string
      }
      else{
        errormsg = pr$errormsg
        found_pname = FALSE
        found_error = TRUE 
      }

      
      # if the new string (after successive replacements) no longer has the
      # pattern we stop
      if(!grepl(patterns[[pname]]$pattern, newstr, fixed=TRUE)){
        found_pname = FALSE }
   
      if(error_cntr >= 100){
        found_pname = FALSE
        found_error = TRUE
        errormsg    = 'Exceeded the maximum number of maxes (100), stuck in a loop?'
      
      }
    
     # incrementing the error counter
     error_cntr = error_cntr + 1
    }
   
    if(found_error){
      vp(cfg, 'Error parsing patterns')
      vp(cfg, paste('String:        ', str,                       sep=""))
      vp(cfg, paste('Pattern name:  ', pname,                     sep=""))
      vp(cfg, paste('Pattern:       ', patterns[[pname]]$pattern, sep=""))
      vp(cfg, paste('Error Message: ', errormsg,                  sep=""))
    
    }
  
  }

 return(newstr)
}


#'@export
#'@title Parse Prototype Functions for Arguments
#'@keywords internal
#'@description 
#' Parses strings to find abstract functions (of the format
#' SIFUNC[ARG1][ARG2][ARG3] and extract the arguments from that function and
#' replace it with actual functions and any additional arguments needed
#'
#'@param str string containing the prototype function call
#'@param pattern string indicating the start of the function eg. \code{"SI_TT_BOLUS["}
#'@param replace string to replace \code{pattern} with
#'@param narg number of arguments to prototype function
#'@param op string used to indicating open parenthesis 
#'@param cp string used to indicating close parenthesis 
#'
#'@return string containing the actual function call/code built from the prototype function
find_bracketed_arguments <- function(str, pattern, replace = '', narg, op = '[', cp=']'){

  # getting the length of the string
  strlen = nchar(str)

  isgood     = TRUE 
  errormsg   =  ''
  new_string = ''
  blank_str  = strrep(' ', strlen)

  # finding the pattern position
  ppos = regexec(pattern, str, fixed=TRUE)


  # checking to see if the pattern is in str
  if(ppos >0){
    pstart    = ppos[[1]][1]
    arg_start = c(attr(ppos[[1]], "match.length")) + pstart -1
    arg_stop  = c()
   
    # counter used to keep track of excess brackets
    excess_p  = 0
   
    procstr = TRUE
    strpos   = arg_start[1] + 1
   
    while(procstr){
   
      # pulling out the current character
      strele = substr(str, strpos, strpos)
   
   
      if((length(arg_start) == length(arg_stop))& (strele == op)){
        arg_start = c(arg_start, strpos) 
        exess_p = 0
      }
      else if(strele == op){
        excess_p = excess_p + 1
      }
      # if we find a closing parenthesis and
      # excess is zero then we've found an 
      # end to the argument
      else if((strele == cp) & (excess_p == 0)){
        arg_stop  = c(arg_stop, strpos) 
      }
      else if(strele==cp){
        excess_p = excess_p - 1
      } 
   
      # if we get to the end of the string
      # then we stop processing it
      if(strpos >= strlen){
        procstr = FALSE
      }
      # if we found matching braces for the number 
      # of arguments then we stop
      if(length(arg_stop) == narg){
        procstr = FALSE }
   
      # incrementing the string position
      strpos = strpos+1
    } 
   
   
    # Checking to make sure we found the same number of start/stop options
    if(length(arg_start) == length(arg_stop)){

      if(narg == length(arg_start)){
        # extracting arguments from the string
        ext_args = c()
        for(idx in 1:length(arg_start)){
           # SIMINT_ARG_1 SIMINT_ARG_2
           newarg = substr(str, arg_start[idx] + 1, arg_stop[idx] - 1)
           replace = gsub(sprintf('SIMINT_ARG_%d', idx), newarg, replace, fixed=TRUE)
           ext_args = c(ext_args, newarg)
        }
       
        new_string = 
        sprintf('%s%s%s', 
                 substr(str, 1,pstart-1),                           # from the beginning until jsut before the function starts
                 replace,                                           # new stuff in the middle
                 substr(str, arg_stop[length(arg_stop)]+1, strlen)) # Just after the function starts to the end
      } else{
       isgood = FALSE
       errormsg = sprintf("Number of arguments specified (%d) different from number found (%d)", narg, length(arg_stop))
     }
   }
   else{
     isgood = FALSE
     errormsg = sprintf("Start indicators (%d) different from stop indicators(%d)", length(arg_start), length(arg_stop))
   }
   
   # Creating a blank_string with markers where 
   # the ID'd positions are in the original string
   if(isgood){
     substring(blank_str, pstart, pstart) = 'S'
     for(idx in 1:length(arg_start)){
      substring(blank_str, arg_start[idx], arg_start[idx]) = toString(idx)
      substring(blank_str, arg_stop[idx],  arg_stop[idx])  = toString(idx)
     
     }
   }
  
  } else{
    isgood = FALSE
    errormsg = sprintf("unable to find patter: '%s' in string", pattern)
  }
  
  finfo = list()
  finfo$isgood     = isgood
  finfo$errormsg   = errormsg
  finfo$str        = str         # original string
  finfo$new_string = new_string  # string with replacement 
  finfo$blank_str  = blank_str   # string with position markers


return(finfo)
}


#'@export
#'@title Actual Function Called by \code{SI_TT_BOLUS}
#'@keywords internal
#'@description The prototype function \code{SI_TT_BOLUS} provides an interface to this function. Based on the input from \code{SI_TT_BOLUS}
#' bolus inputs will be updated for the current titration time. 
#' 
#'@param cfg       ubiquity system object    
#'@param state     dosing state/compartment (Defined in \code{<B:events>})
#'@param values    vector of dosing amounts (in dosing units defined by \code{<B:events>})
#'@param times     vector of dosing times relative to the current titration time (in # time units defiend by \code{<B:times>})
#'@param tt_ts     list of timescale values for the current titration time
#'@param tsinfo    list with timescale information for inputs (bolus, rates, etc)
#'@param repdose   \code{"none"}, \code{"last"}, \code{"all"}
#'@param interval  interval to repeat in the units defined in \code{<B:times>}
#'@param number    number of times to repeat 
#'
#'@return ubiquity system object with the bolus dosing updated.
system_set_tt_bolus <- function(cfg, state, values, times, tt_ts,  tsinfo, repdose="none", interval=1, number=0){


offset = tt_ts$time/tsinfo$bolus

if(repdose == "none"){
  bolus_times  = offset+times 
  bolus_values = values
  }
else if(repdose == "last"){
  bolus_times  = offset+c(times, 1:number*interval) 
  bolus_values = c(values, rep(x=values[length(values)], times=number))
  }

cfg = system_set_bolus(cfg    = cfg,
                       state  = state,
                       times  = bolus_times,
                       values = bolus_values)
return(cfg)
}

#'@export
#'@title Actual Function Called by \code{SI_TT_RATE}
#'@description The prototype function \code{SI_TT_RATE} provides an abstract interface to this function. Based on the input from \code{SI_TT_RATE}
#' infusion rate inputs will be updated for the current titration time. 
#' 
#'@param cfg       ubiquity system object    
#'@param rate      name of the infusion rate to update(Defined in \code{<R:?>})
#'@param times     vector of switching times relative to the current titration time (in time units defined by \code{<R:?>})
#'@param levels    vector of infusion rates (in dosing units defined by \code{<R:?>})
#'@param tt_ts     list of timescale values for the current titration time
#'@param tsinfo    list with timescale information for inputs (bolus, rates, etc)
#'@param repdose   \code{"none"} or \code{"sequence"}
#'@param interval  interval to repeat in the units defined in \code{<R:?>}
#'@param number    number of times to repeat 
#'
#'@return ubiquity system object with the infusion rates updated.
system_set_tt_rate <- function(cfg, rate, times, levels, tt_ts, tsinfo, repdose="none", interval=1, number=0){


# calculating the offset based on the current titration time
#
#  Titration time (in simulation units)
#  ------------------------------------------------- = titration time in rate units
#     Rate time scale (simulation units/rate units)
#

offset = tt_ts$time/tsinfo$infusion_rates[[rate]]

if(repdose == "sequence"){

  rate_times  = c()
  rate_levels = c()
  start_times = seq(0,number)

  for(tidx in start_times){
     rate_times  = c(rate_times, (times+offset+ interval*tidx))
     rate_levels = c(rate_levels, levels)
    }

  } 
else {
  rate_times  = times + offset
  rate_levels = levels

  }


cfg = system_set_rate(cfg    = cfg,
                      rate   = rate,
                      times  = rate_times,
                      levels = rate_levels)

return(cfg)
}

#'@export
#'@title Set Bolus Inputs
#'@description Defines infusion rates specified in the system file using  \code{<B:times>} and   \code{<B:events>} 
#'
#'@param cfg ubiquity system object    
#'@param state name of the state to apply the bolus
#'@param times list of injection times 
#'@param values corresponding list injection values     
#'
#'@return Ubiquity system object with the bolus information set
#'
#'@examples
#' \donttest{
#' # Creating a system file from the mab_pk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#' 
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'
#' # Clearing all inputs
#' cfg = system_zero_inputs(cfg)
#'
#' # SC dose of 200 mg
#' cfg = system_set_bolus(cfg, state   ="At", 
#'                             times   = c(  0.0),  #  day
#'                             values  = c(200.0))  #  mg
#'}
#'@seealso \code{\link{system_zero_inputs}}
system_set_bolus <- function(cfg, state, times, values){
  
  errormsgs = c()

  # checking the user input
  isgood = TRUE
  if(!(length(times) == length(values))){
    errormsgs = c(errormsgs, "The times and values have differnt lengths")
    errormsgs = c(errormsgs, " ")
    isgood = FALSE
    }
  if(!(state %in% names(cfg$options$inputs$bolus$species))){
    errormsgs = c(errormsgs, paste("The state >", state, "< could not be found", sep=""))
    isgood = FALSE
  }
  
  if(isgood){
    bolus_old = cfg$options$inputs$bolus;
    # getting all of the times both previous and those in the 
    # current state being specified
    all_times = unique(sort(c(bolus_old$times$values, times)))
    
    # looping through the species and figuring out which times we need to keep
    all_times_keep = c();
    for(current_time in all_times){
      keep_time = FALSE
      for(species in names(cfg$options$inputs$bolus$species)){
        # if the speceis is the one being updated then we 
        # look and see if the current time is in the list 
        # of times to be updated
        if(species == state){
          if(!is.na(match(current_time, times))){
            keep_time = TRUE
          }
        # Otherwise this is a different species. So we see if the time
        # is in the bolus_old list. If it is, we see if this species 
        # has a non-zero value
        } else{ 
          if(!is.na(match(current_time, bolus_old$times$values))){
            # pulling out the index in bolus_old that corresponds to this time
            time_index = match(current_time, bolus_old$times$values)
            if(bolus_old$species[[species]]$values[[time_index]] > 0){
              keep_time = TRUE
            }
          }
        }
      }
      # keep_time should be true if there is a value specified in the current
      # state being udpated or if there is a non-zero value in the other
      # states. We then add this to all_times_keep:
      if(keep_time == TRUE){
        all_times_keep = c(all_times_keep, current_time)
      }
    }
    
    # 
    # zeroing out the bolus information for the species
    # 
    for(species in names(cfg$options$inputs$bolus$species)){
      cfg$options$inputs$bolus$species[[species]]$values = c()
    }
    
    #
    # Now building the bolus list based on 
    # all_times_keep and the values specified above
    #
    for(current_time in all_times_keep){
      for(species in names(cfg$options$inputs$bolus$species)){
        # default value of dose set to zer0
        species_value = 0
        # then we check to see if it's nonzero and overwrite accordingly
        if(species == state){
          if(!is.na(match(current_time, times))){
             time_index = match(current_time, times) 
             species_value = values[time_index]
          }
        }
        else{
          if(!is.na(match(current_time, bolus_old$times$values))){
            time_index = match(current_time, bolus_old$times$values) 
            species_value = bolus_old$species[[species]]$values[[time_index]]
          }
        }
        
        # storing the bolus value for the specific species
        cfg$options$inputs$bolus$species[[species]]$values = 
          c(cfg$options$inputs$bolus$species[[species]]$values, species_value)
      }
    }
    cfg$options$inputs$bolus$times$values = all_times_keep

  } else {
    vp(cfg, sprintf("system_set_bolus()", fmt="h1")) 
    vp(cfg, sprintf("Something went wrong and the bolus  ")) 
    vp(cfg, sprintf("was not set:")) 
    vp(cfg, errormsgs) 
    
    }

return(cfg)}

#'@export
#'@title Set Variability Terms
#'@description Set elements of the current variance covariance matrix
#' specified in the system file with \code{<IIV:?:?> ?}, \code{<IIVCOR:?:?>?}, \code{<IIVSET:?:?> ?}, \code{<IIVCORSET:?:?>?}
#'
#'@param cfg ubiquity system object    
#'@param IIV1 row name of the variance/covariance matrix
#'@param IIV2 column name of the variance/covariance matrix element
#'@param value value to assign to the variance/covariance matrix element
#'
#'@return Ubiquity system object with IIV information set
#'@examples
#' \donttest{
#' # Creating a system file from the mab_pk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#' 
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'
#' # Clearing all inputs
#' cfg = system_zero_inputs(cfg)
#'
#' # Setting the covariance element for CL and Vc to 0.03
#' cfg = system_set_iiv(cfg,
#'                      IIV1 = "ETACL",
#'                      IIV2 = "ETAVc",
#'                      value=0.03)
#'}
#'@seealso \code{\link{system_fetch_iiv}}
system_set_iiv <- function(cfg, IIV1, IIV2, value){
  if("iiv" %in% names(cfg)){
    IIV1_idx = match(c(IIV1), names(cfg$iiv$iivs))
    IIV2_idx = match(c(IIV2), names(cfg$iiv$iivs))
    
    if(is.na(IIV1_idx)){
      vp(cfg, paste("IIV >", IIV1, "<not found", sep="")) 
    }else if(is.na(IIV2_idx)){
      vp(cfg, paste("IIV >", IIV2, "<not found", sep="")) 
    }else{
      cfg$iiv$values[IIV1_idx, IIV2_idx] = value
      cfg$iiv$values[IIV2_idx, IIV1_idx] = value
    }
  } else {
    vp(cfg, "ubiquity::system_set_iiv()", fmt="h1")
    vp(cfg, "No IIV information was found") 
    vp(cfg, "These can be specified using: ") 
    vp(cfg, "<IIV:?>, <IIV:?:?>, and <IIVCOR:?:?> ")
  }
return(cfg)}

#-----------------------------------------------------------
#'@export
#'@title View Information About the System
#'@description Displays information (dosing, simulation options, covariates,
#' etc) about the system.
#'
#'@param cfg ubiquity system object    
#'@param field string indicating the aspect of the system to display
#'@param verbose Boolean variable that when set to true will echo the information to the screen 
#'
#'@return sequence of strings with system in formation (one line per element)
#'
#' The \code{field} 
#' \itemize{
#'    \item \code{"all"} will show all information about the system
#'    \item \code{"parameters"} summary of parameter information
#'    \item \code{"bolus"} currently set bolus dosing
#'    \item \code{"rate"} infusion rate dosing 
#'    \item \code{"covariate"} covariates
#'    \item \code{"iiv"} variance/covariance information
#'    \item \code{"datasets"} loaded datasets
#'    \item \code{"simulation"} simulation options
#'    \item \code{"estimation"} estimation options
#'    \item \code{"nca"} non-compartmental analyses that have been performed
#' }
#'@examples
#' # To log and display the current system information:
#' \donttest{
#' # Creating a system file from the mab_pk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#' 
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'
#'   msgs = system_view(cfg, verbose=TRUE)
#' }
system_view <- function(cfg,field="all", verbose=FALSE) {
  
  msgs = c()
  
  # Processing infusion rate information
  if(field == "all" | field== "parameters"){
      msgs = c(msgs, sprintf(" Parameter Information"))
      msgs = c(msgs, sprintf(" Parameter set selected:"))
      msgs = c(msgs, sprintf("   Short Name:  %s", cfg$parameters$current_set))
      msgs = c(msgs, sprintf("   Description: %s", cfg$parameters$sets[[cfg$parameters$current_set]]$name))
      msgs = c(msgs, sprintf(" Default parameters for current set:"))
      msgs = c(msgs, sprintf("%s |  %s | %s",
                     pad_string('name', 18), 
                     pad_string('value', 12), 
                     pad_string('units', 15)))
      msgs = c(msgs, paste(replicate(52, "-"), collapse = ""))
      for(pidx in 1:length(cfg$parameters$matrix$name)){
      msgs = c(msgs, sprintf("%s |  %s | %s",
                  pad_string(as.character(cfg$parameters$matrix$name[pidx]), 18), 
                  var2string(cfg$parameters$matrix$value[pidx], 12), 
                  pad_string(as.character(cfg$parameters$matrix$units[pidx]), 15)))
      }
      msgs = c(msgs, paste(replicate(52, "-"), collapse = ""))
      msgs = c(msgs, " ")
  }
  
  
  # Processing bolus information
  if(field == "all" | field== "bolus"){
    if("bolus" %in% names(cfg$options$inputs))  {
      msgs = c(msgs, sprintf(" Bolus dosing details "))
      msgs = c(msgs, sprintf("%s |  %s | %s | %s",
                      pad_string("field", 10),
                      pad_string("values", 10),
                      pad_string("scaling", 10),
                      pad_string("units", 10)))
      msgs = c(msgs, paste(replicate(50, "-"), collapse = ""))
      msgs = c(msgs, sprintf("%s |  %s | %s | %s",
                     pad_string("times", 10),
                     pad_string(paste(cfg$options$inputs$bolus$times$values, collapse=" "), 10),
                     pad_string(cfg$options$inputs$bolus$times$scale, 10),
                     pad_string(cfg$options$inputs$bolus$times$units, 10)))
      
      for(species in names(cfg$options$inputs$bolus$species)){
        msgs = c(msgs,  sprintf("%s |  %s | %s | %s",
                   pad_string(species, 10),
                   pad_string(paste(cfg$options$inputs$bolus$species[[species]]$values, collapse=" "), 10),
                   pad_string(cfg$options$inputs$bolus$species[[species]]$scale, 10),
                   pad_string(cfg$options$inputs$bolus$species[[species]]$units, 10)))
      }
      msgs = c(msgs, paste(replicate(50, "-"), collapse = ""))
      
    } else {
      msgs = c(msgs, "No bolus information found") }
  }
  
  # Processing infusion rate information
  if(field == "all" | field== "rate"){
    if("infusion_rates" %in% names(cfg$options$inputs))  {
      msgs = c(msgs, sprintf(" Infusion rate details "))
      msgs = c(msgs, sprintf("%s | %s | %s | %s | %s",
                 pad_string("Rate ", 10),
                 pad_string("field", 10),
                 pad_string("values", 10),
                 pad_string("scaling", 10),
                 pad_string("units", 10)))
      msgs = c(msgs, paste(replicate(65, "-"), collapse = ""))
      for(rate    in cfg$options$inputs$infusion_rate_names){
        msgs =c(msgs, sprintf("%s | %s | %s | %s | %s",
                   pad_string(rate, 10),
                   pad_string('time', 10),
                   pad_string(paste(cfg$options$inputs$infusion_rates[[rate]]$times$values, collapse=" "), 10),
                   pad_string(      cfg$options$inputs$infusion_rates[[rate]]$times$scale, 10),
                   pad_string(      cfg$options$inputs$infusion_rates[[rate]]$times$units, 10)))
        msgs =c(msgs, sprintf("%s | %s | %s | %s | %s",
                   pad_string('', 10),
                   pad_string('levels', 10),
                   pad_string(paste(cfg$options$inputs$infusion_rates[[rate]]$levels$values, collapse=" "), 10),
                   pad_string(      cfg$options$inputs$infusion_rates[[rate]]$levels$scale, 10),
                   pad_string(      cfg$options$inputs$infusion_rates[[rate]]$levels$units, 10)))
      }
      msgs = c(msgs, paste(replicate(65, "-"), collapse = ""))
      msgs = c(msgs, " ")
    } else {
      msgs =c(msgs, "No infusion rate information found") }
  }
  
  # Processing covariate information
  if(field == "all" | field== "covariate"){
    if("covariates" %in% names(cfg$options$inputs))  {
      msgs = c(msgs, sprintf(" Covariate details"))
      msgs = c(msgs, sprintf("%s | %s | %s | %s",
                     pad_string(" Covariate", 10),
                     pad_string("field", 10),
                     pad_string("values", 10),
                     pad_string("units", 10)))
      msgs = c(msgs, paste(replicate(50, "-"), collapse = ""))
        for(covariate in names(cfg$options$inputs$covariates)){
          msgs = c(msgs, sprintf("%s | %s | %s | %s",
                     pad_string(covariate, 10),
                     pad_string('time', 10),
                     pad_string(paste(cfg$options$inputs$covariates[[covariate]]$times$values, collapse=" "), 10),
                     pad_string(      cfg$options$inputs$covariates[[covariate]]$times$units, 10)))
          msgs = c(msgs, sprintf("%s | %s | %s | %s",
                     pad_string(sprintf('(%s)', cfg$options$inputs$covariates[[covariate]]$cv_interp), 10),
                     pad_string('levels', 10),
                     pad_string(paste(cfg$options$inputs$covariates[[covariate]]$values$values, collapse=" "), 10),
                     pad_string(      cfg$options$inputs$covariates[[covariate]]$values$units,  10)))
        }
        msgs = c(msgs, paste(replicate(50, "-"), collapse = ""), " ")
    } else {
      msgs = c(msgs, "No covariate information found", " ")}
  }
  
  # Processing iiv information    
  if(field == "all" | field== "iiv"){
    if("iiv" %in% names(cfg))  {
      msgs = c(msgs, sprintf(" IIV details"))
      msgs = c(msgs, sprintf(" IIV/Parameter set:"))
      msgs = c(msgs, sprintf("   Short Name:  %s ", cfg$iiv$current_set))
      msgs = c(msgs, sprintf(" Variance/covariance matrix"))
      iivs = names(cfg$iiv$iivs)
      # creating the headers
      msgs = c(msgs, " ")
      row_str =  pad_string(" ", 18)
      for(colidx in 1:length(iivs)){
        row_str = sprintf("%s%s", row_str, pad_string(iivs[colidx], 18))}
        msgs = c(msgs, row_str)
      for(rowidx in 1:length(iivs)){
        row_str = sprintf("%s",  pad_string(iivs[rowidx], 18))
        for(colidx in 1:length(iivs)){
          row_str = sprintf("%s%s", row_str, var2string( cfg$iiv$values[rowidx,colidx], 18))
        }
        msgs = c(msgs, row_str)
      }
      msgs = c(msgs, " " )
        
      msgs = c(msgs, sprintf(" On parameters"))
      for(pname in names(cfg$iiv$parameters)){
         msgs = c(msgs, sprintf(" %s, %s(%s)",
                       pad_string(pname,10),
                       pad_string(cfg$iiv$parameters[[pname]]$iiv_name,10),
                       cfg$iiv$parameters[[pname]]$distribution, 10)  )
      }
      
    } else {
      msgs = c(msgs, "No IIV information found") }
  }

  #
  # Simulation Options
  #
  if(field == "all" | field== "simulation"){
     msgs = c(msgs, sprintf(" ", "Simulation details"))
     if('integrate_with' %in% names(cfg$options$simulation_options)){
       msgs = c(msgs, sprintf(" integrate_with          %s", cfg$options$simulation_options$integrate_with))
     }
     if('output_times' %in% names(cfg$options$simulation_options)){
       msgs = c(msgs, sprintf(" output_times            %s ", var2string_gen(cfg$options$simulation_options$output_times)))
     }
  }
  #
  # Solver Options
  #

  #
  # Stochastic Options
  #


  #
  # Datasets
  #
  if(field == "all" | field== "datasets"){
      if("data" %in% names(cfg))  {
        msgs = c(msgs, " ", " Dataset details")
        for(ds_name   in names(cfg$data)){
          msgs = c(msgs, paste(replicate(20, "-"), collapse = ""))
          msgs = c(msgs, sprintf(" Name:      %s", ds_name))
          msgs = c(msgs, sprintf(" Data File: %s", cfg$data[[ds_name]]$data_file$name))
          if("sheet" %in% names(cfg$data[[ds_name]]$data_file)){
            msgs = c(msgs, sprintf(" Sheet:     %s", cfg$data[[ds_name]]$data_file$sheet))
          }
          msgs = c(msgs, sprintf(" Columns:   %s", paste(colnames(cfg$data[[ds_name]]$values), collapse=", ")))
          msgs = c(msgs, sprintf(" Rows:      %d", nrow(cfg$data[[ds_name]]$values)))
        }
      } else {
       msgs = c(msgs, " No datasets loaded") }
  }


  #
  # Estimation Options
  #
  if(field == "all" | field== "estimation"){
     msgs = c(msgs, " ")
     msgs = c(msgs,         "Estimation details ")
     msgs = c(msgs, sprintf(" Parameter set:          %s",  cfg[["parameters"]][["current_set"]]))
     msgs = c(msgs, sprintf(" Parameters estimated:   %s",  toString(names(cfg[["estimation"]][["mi"]]))))
     msgs = c(msgs, sprintf(" objective_type          %s",  cfg[["estimation"]][["objective_type"]]))
     msgs = c(msgs, sprintf(" observation_function    %s",  cfg[["estimation"]][["options"]][["observation_function"]]))
  }


  #
  # Dataset information
  #
  if(field == "all" | field== "cohorts"){
     if("cohorts" %in% names(cfg))  {
       msgs = c(msgs, " ")
       msgs = c(msgs," Cohort details")
       for(ch_name   in names(cfg$cohorts)){
         msgs = c(msgs,sprintf(" Cohort: %s", ch_name))
         msgs = c(msgs, paste(replicate(20, "-"), collapse = ""))
         msgs = c(msgs,sprintf(" dataset: %s; (%s)", cfg$cohorts[[ch_name]]$dataset, cfg$data[[cfg$cohorts[[ch_name]]$dataset]]$data_file$name))

         # output times
         if("output_times" %in% names(cfg[["cohorts"]][[ch_name]])){
           msgs = c(msgs,sprintf(" Cohort-specific output times (output_times) "))
           msgs = c(msgs, sprintf("     output_times = %s", var2string_gen(cfg[["cohorts"]][[ch_name]][["output_times"]])))
           msgs = c(msgs, "")
         }

         msgs = c(msgs,sprintf(" Cohort options (options) "))

         #options
         if('options' %in% names(cfg$cohorts[[ch_name]])){
           for(opname in names(cfg$cohorts[[ch_name]]$options)){
             msgs = c(msgs, sprintf("     %s = c(%s)", opname, toString(cfg$cohorts[[ch_name]]$cf[[opname]])))
           }
         } else{
           msgs = c(msgs, "     none")
         }
         msgs = c(msgs, " ")

         #filter 
         msgs = c(msgs, " Cohort filter (cf)")
         if('cf' %in% names(cfg$cohorts[[ch_name]])){
           for(col_name in names(cfg$cohorts[[ch_name]]$cf)){
             msgs = c(msgs, sprintf("     %s = c(%s)", col_name, toString(cfg$cohorts[[ch_name]]$cf[[col_name]])))
           }
         } else{
           msgs = c(msgs, "     none")
         }
         msgs = c(msgs, " ")

         msgs = c(msgs, " Cohort-specific parameters (cp)")
         if('cp' %in% names(cfg$cohorts[[ch_name]])){
           for(pname in names(cfg$cohorts[[ch_name]]$cp)){
             msgs = c(msgs, sprintf("     %s = %s", pname, toString(cfg$cohorts[[ch_name]]$cp[[pname]])))
           }
         } else{
           msgs = c(msgs, "     none")
         }

         msgs = c(msgs, " ")
         msgs = c(msgs, " Outputs")
         for(oname in names(cfg$cohorts[[ch_name]]$outputs)){

           msgs = c(msgs, sprintf("   >%s<              ", oname))
           msgs = c(msgs, sprintf("    Dataset:         "))
           msgs = c(msgs, sprintf("     Sample Time  %s ", cfg$cohorts[[ch_name]]$outputs[[oname]]$obs$time))
           msgs = c(msgs, sprintf("     Observation  %s ", cfg$cohorts[[ch_name]]$outputs[[oname]]$obs$value))
           if('missing' %in% names(cfg$cohorts[[ch_name]]$outputs[[oname]]$obs)){
               msgs = c(msgs, sprintf("     Missing      %s ", toString(cfg$cohorts[[ch_name]]$outputs[[oname]]$obs$missing)))
           }

           msgs = c(msgs, " ")

           msgs = c(msgs, sprintf("    Model:           "))
           msgs = c(msgs, sprintf("     Timescale    %s ", cfg$cohorts[[ch_name]]$outputs[[oname]]$model$time))
           msgs = c(msgs, sprintf("     Output       %s ", cfg$cohorts[[ch_name]]$outputs[[oname]]$model$value))
           msgs = c(msgs, sprintf("     Variance     %s ", cfg$cohorts[[ch_name]]$outputs[[oname]]$model$variance))
           msgs = c(msgs, sprintf("    ---              "))

         }
         msgs = c(msgs, " ")

       }
     } else {
       msgs = c(msgs, " No cohort information found") }
  }

  #
  # NCA 
  #
  # Processing infusion rate information
  if(field == "all" | field== "nca"){
    if("nca" %in% names(cfg)){
      for(analysis_name in  names(cfg[["nca"]])){
        nca_tmp = cfg[["nca"]][[analysis_name]]
        NCA_cols = system_fetch_nca_columns(cfg, analysis_name = analysis_name)
        msgs = c(msgs, " ")
        msgs = c(msgs, "NCA Details")
        msgs = c(msgs, paste("  Analysis:                       ", analysis_name))
        msgs = c(msgs, paste("   Options:                       "))
        msgs = c(msgs, paste("      Dose to conc scale          ", nca_tmp[["ana_opts"]][["dscale"]]))
        msgs = c(msgs, paste("      Min NCA points              ", nca_tmp[["ana_opts"]][["NCA_min"]]))
        msgs = c(msgs, paste("      Extrapolate C0              ", nca_tmp[["ana_opts"]][["extrap_C0"]]))
        msgs = c(msgs, paste("      Number of extrap points     ", nca_tmp[["ana_opts"]][["extrap_N"]]))
        msgs = c(msgs, paste("      Sparse                      ", nca_tmp[["ana_opts"]][["sparse"]]))
        msgs = c(msgs, paste("   Dataset (", nca_tmp[["ana_opts"]][["dsname"]], ")"))
        msgs = c(msgs, paste("      NCA Field-->Column in dataset"))
        msgs = c(msgs, paste("      -----------------------------"))
        for(dsfield in names(nca_tmp[["ana_opts"]][["dsmap"]])){
          msgs = c(msgs, paste("      ", dsfield, "-->", nca_tmp[["ana_opts"]][["dsmap"]][[dsfield]], sep=""))
        }
        msgs = c(msgs, paste("   The analysis contains the following columns"))
        msgs = c(msgs, "")
        len_NCA_col     = NCA_cols$len_NCA_col      
        len_label       = NCA_cols$len_label       
        len_from        = NCA_cols$len_from        
        len_description = 40
        nca_res_header  = paste(pad_string("column name", location="end", maxlength=(len_NCA_col        + 2)), "|",
                                pad_string("from",        location="end", maxlength=(len_from           + 2)), "|",
                                pad_string("label",       location="end", maxlength=(len_label          + 2)), "|",
                                pad_string("description", location="end", maxlength=(len_description    + 2))     ,sep="")
     
        row_sep = paste(rep("-", nchar(nca_res_header)), collapse="")

        msgs = c(msgs, paste("     ", row_sep, sep=""))
        msgs = c(msgs, paste("     ", nca_res_header, sep=""))
        msgs = c(msgs, paste("     ", row_sep, sep=""))

        for(ridx in 1:nrow(NCA_cols[["NCA_col_summary"]])){
           col_name       = as.character(NCA_cols[["NCA_col_summary"]][ridx,][["col_name"]])
           from           = as.character(NCA_cols[["NCA_col_summary"]][ridx,][["from"]])
           label          = as.character(NCA_cols[["NCA_col_summary"]][ridx,][["label"]])
           description    = as.character(NCA_cols[["NCA_col_summary"]][ridx,][["description"]])

           nca_res_row    = paste(pad_string(col_name,    location="end", maxlength=(len_NCA_col        + 2)), "|",
                                  pad_string(from,        location="end", maxlength=(len_from           + 2)), "|",
                                  pad_string(label,       location="end", maxlength=(len_label          + 2)), "|",
                                  pad_string(description, location="end", maxlength=(len_description    + 2))     ,sep="")
           msgs = c(msgs, paste("     ", nca_res_row, sep=""))
        }
        msgs = c(msgs, paste("     ", row_sep, sep=""))
      }
    } else {
      msgs = c(msgs, "No NCA has been performed") 
    }

  }


  
  # Processing infusion rate information
  if(field == "all" | field== "XXX"){
   #  if("infusion_rates" %in% names(cfg$options$inputs))  {
   #  } else {
   #  }
  }

  # This will print the current results to the screen if 
  # verbose has been selected
  if(verbose){
     vp(cfg, msgs) }
  
return(msgs)}
# /system_view
#-----------------------------------------------------------

#'@export
#'@title Convert R Objects to Strings
#'@description Mechanism for converting R objects strings for reporting. 
#'@keywords internal
#'
#'@param var R variable
#'
#'@return Variable in string form
#'
#'@examples
#'var2string_gen(c(1,2,3))
var2string_gen <- function(var)  {
if(is.vector(var)){
  mystr = sprintf('min = %s; max = %s; length = %d ', 
  var2string(min(var), maxlength=0, nsig_f=1),
  var2string(max(var), maxlength=0, nsig_f=1), length(var)) 
} else {
  if(is.numeric(var)){
    mystr = var2string(var)
  } else {
    mystr = toString(var)
  }
}
return(mystr)
}


#'@export
#'@title Converts Numeric Variables into Padded Strings
#'@description Mechanism for converting numeric variables into strings for reporting. 
#'
#'@param vars numeric variable or a vector of numeric variables
#'@param maxlength if this value is greater than zero spaces will be added to the beginning of the string until the total length is equal to maxlength
#'@param nsig_e number of significant figures for scientific notation
#'@param nsig_f number of significant figures for numbers (2.123)
#'
#'@return Number as a string padded
#'
#'@examples
#'var2string(pi, nsig_f=20)
#'var2string(.0001121, nsig_e=2, maxlength=10)
var2string <- function(vars,maxlength=0, nsig_e = 3, nsig_f = 4) {
#  str = var2string(var, 12) 
#  converts the numerical value 'var' to a padded string 12 characters wide

strs = c()

for(var in vars){
  if(is.character(var)){
    str = var
  } else if(is.na(var)){
    str = "NA"
  } else if(is.nan(var)){
    str = "NaN"
  } else if(var == 0){
   str = '0' 
  }else if((var < .01 )| (var > 999)){
    #str = sprintf('%.3e', var )
    eval(parse(text=sprintf("str = sprintf('%%.%de', var )",nsig_e)))
  }
  else{
    #str = sprintf('%.4f', var )}
     eval(parse(text=sprintf("str = sprintf('%%.%df', var )",nsig_f)))
    }
  
  str = pad_string(str, maxlength)

  strs = c(strs, str)
}



return(strs)}


#'@export
#'@title Pad String with Spaces
#'@description Adds spaces to the beginning or end of strings until it reaches the maxlength. Used for aligning text.
#'
#'@param str string
#'@param maxlength length to pad to
#'@param location either \code{"beginning"} to pad the left or \code{"end"} to pad the right
#'
#'@return Padded string
#'@examples
#'pad_string("bob", maxlength=10)
#'pad_string("bob", maxlength=10, location="end")
pad_string <-function(str, maxlength=1, location='beginning'){
#  str = padstring(str, maxlength)
#
#  adds spaces to the beginning of the string 'str' until it is length
#  'maxlength'

  
  if(nchar(str)<maxlength)  {
    # calculating the number of spaces to add
    pad_length = maxlength-nchar(str) 
    # appending the spaces to the beginning of str
    if(location == "beginning"){
      str = sprintf('%s%s', paste(replicate(pad_length, " "), collapse = ""),str)
    }
    else{
      str = sprintf('%s%s', str, paste(replicate(pad_length, " "), collapse = ""))
    }
  }
return(str)}



#'@export
#'@title Run Population Simulations 
#'@description  Used to run Population/Monte Carlo simulations with subjects
#' generated from either provided variance/covariance information or a dataset. 
#' 
#'@param parameters list containing the typical value of parameters
#'@param cfg ubiquity system object    
#'@param progress_message text string to prepend when called from the ShinyApp
#'@param show_progress Boolean value controlling the display of a progress indicator (\code{TRUE})
#'
#'@return Mapped simulation output with individual predictions, individual
#' parameters, and summary statistics of the parameters. The Vignettes below
#' details on the format of the output. 
#'
#'@details 
#'
#' Failures due to numerical instability or other integration errors will be
#' captured within the function. Data for those subjects will be removed from the
#' output. Their IDs will be displayed as messages and stored in the output. 
#'
#'
#' For more information on setting options for population simulation see the
#' stochastic section of the \code{\link{system_set_option}} help file.
#'
#'
#'@seealso Vignette on simulation (\code{vignette("Simulation", package = "ubiquity")}) titration (\code{vignette("Titration", package = "ubiquity")}) as well as \code{\link{som_to_df}}
simulate_subjects = function (parameters, cfg, show_progress = TRUE, progress_message = "Simulating Subjects:"){
#function [predictions] = simulate_subjects(parameters, cfg)
#
# Inputs:
#
# cfg - System configuration variable generated in the following manner:
#
# cfg = build_system()
# cfg = system_select_set(cfg, 'default')
#
# parameters - list of typical parameter values. This can be obtained from
# the cfg variable:
#
# parameters = system_fetch_parameters(cfg)
#
# cfg$options$stochastic
# list with the following fields:
#
#   nsub 
#      number of subjects to simulate  (default 100)
#
#   seed  
#      seed for random number generator (default 8675309)
#
#   ci    
#      desired confidence interval (e.g. 95)
#
#   ponly 
#      generate only the parameters and do not perform the simulation
#      TRUE, or FALSE (default)
#
#   sub_file 
#      name of the data structure loaded with system_load_data
#
#
# These values can then be modified as necessary.
#
# Output:
#
# The predictions data structure contains the following:
#
# predictions$tcsummary
#   This is a data frame that summarizes the predictions with the following
#   fields:
#     ts.TIMESCALE
#     s.STATE.X
#     o.OUTPUT.X
#
#   Where TIMESCALE, STATE, and OUTPUT refer to the named timescales states
#   and outputs. X can be either the mean, median, lb_ci or ub_ci (the latter
#   represent the lower and upper bounds on the confidence interval).
#
#
# predictions$subjects 
#   Contains the parameters and secondary parameters, one row for each subject  
#
# predictions$times
#   A field for every timescale containing the sample times from the
#   simulation.
#
# predictions$states and predictions$outputs -
#   There is a field for each state or output which contains a profile for
#   each subject (one per column) and each row corresponds to the sampling
#   times in predictions$times


# List to hold the outputs
p = list(subjects = list(parameters           = NULL,
                         secondary_parameters = NULL),
         tcsummary = NULL,
         states    = NULL,
         outputs   = NULL,
         times     = NULL)

# defining the default values
nsub              = 100
seed              = 8675309
ci                = 95
ponly             = FALSE
sub_file          = NULL
sub_file_sample   = 'with replacement'
sub_file_ID_col   = 'SIMINT_ID'
sub_file_TIME_col = 'SIMINT_TIME'
# Used to map IDs form the sub_file to 
# Subject IDs
sub_file_ID_map   = data.frame(file_ID = c(),
                               sub_ID  = c())

state_names  = names(cfg$options$mi$states)
output_names = names(cfg$options$mi$outputs)
ssp_names    = names(cfg$options$ssp)

if("stochastic" %in% names(cfg$options)){
# Parsing stochastic options
  if("nsub" %in% names(cfg$options$stochastic)){
    nsub = cfg$options$stochastic$nsub
  }
  
  if("seed" %in% names(cfg$options$stochastic)){
    seed = cfg$options$stochastic$seed
  } 
  
  if("ci" %in% names(cfg$options$stochastic)){
    ci   = cfg$options$stochastic$ci
  } 
  
  if("sub_file" %in% names(cfg$options$stochastic)){
    sub_file   = cfg$options$stochastic$sub_file
  } 

  if("sub_file_sample" %in% names(cfg$options$stochastic)){
    sub_file_sample   = cfg$options$stochastic$sub_file_sample
  } 

  if("ponly" %in% names(cfg$options$stochastic)){
    ponly = cfg$options$stochastic$ponly
  } 

  if("states" %in% names(cfg$options$stochastic)){
    state_names = cfg$options$stochastic$states
  } 

  if("ssp" %in% names(cfg$options$stochastic)){
    ssp_names = cfg$options$stochastic$ssp
  } 


  if("outputs" %in% names(cfg$options$stochastic)){
    output_names = cfg$options$stochastic$outputs
    # By default all outputs will include those with and without residual error.
    # If the user specifies outputs manually, then we also add in the output
    # with error if it has been defined in the system file.
    output_names_specified = output_names;
    for(output_name in output_names_specified){
      if(output_name %in% names(cfg$ve)){
        output_names = c(output_names, sprintf('SIOE_%s', output_name))
      }
    }
  } 

  # Defining the columns to keep from the simulation
  ts_names     = names(cfg$options$time_scales) 
  ts_names     = ts_names[ts_names != "time"] 
  state_names  = unlist(state_names)
  output_names = unlist(output_names)
  ssp_names    = unlist(ssp_names)


  col_keep = c("time",
               state_names,
               output_names,
               ssp_names, 
               paste("ts.", ts_names, sep=""))

}



isgood = TRUE;

if("iiv" %in% names(cfg) | !is.null(sub_file)){

  # If the subjects file is null we check the IIV matrix
  if(is.null(sub_file)){
    # otherwise we check the IIV
    if(min((eigen((cfg$iiv$values + (cfg$iiv$values))/2))$values) <= 0){
      vp(cfg, "simulate_subjects()")
      vp(cfg, "Warning: The variance/covariance matrix is not   ")
      vp(cfg, "positive semi-definite. Testing only the diagonal")
      vp(cfg, "elements. I.e. no covariance/interaction terms   ")
    
      cfg$iiv$values = diag(diag(cfg$iiv$values))
      if(min((eigen((cfg$iiv$values + (cfg$iiv$values))/2))$values) <= 0){
        vp(cfg, "Failed using only diagonal/variance elements.")
        vp(cfg, "Check the specified IIV elements in")
        vp(cfg, "cfg$iiv$values")
        isgood = FALSE 
      } else {
        vp(cfg, "Using only the diagional elements seems to   ")
        vp(cfg, "have worked. Understand that the results do  ")
        vp(cfg, "not include any interaction.                 ")
      }
      vp(cfg, " ")
    }
  }
  else{
  
      # Summarizing information about the data file
      sub_file_dataset        = cfg[["data"]][[sub_file]][["values"]]
      sub_file_nrow           = nrow(sub_file_dataset)
      sub_file_nsub           = length(unique(sub_file_dataset[[sub_file_ID_col]]))
      sub_file_file_name      = cfg[["data"]][[sub_file]][["data_file"]][["name"]]

      # Parameter information
      sub_file_p_found        =       intersect(names(parameters), names(sub_file_dataset))
      sub_file_p_missing      =       setdiff(names(parameters), names(sub_file_dataset))
      if(length(sub_file_p_found) > 0){
        sub_file_p_found_str  = paste(intersect(names(parameters), names(sub_file_dataset)), collapse=', ') }
      else{
        sub_file_p_found_str  =  "None" }
      if(length(sub_file_p_found) > 0){
        sub_file_p_missing_str= paste(setdiff(names(parameters), names(sub_file_dataset)), collapse=', ')}
      else{
        sub_file_p_missing_str  =  "None" }

      # Covariate information
      sub_file_cov_all = names(cfg$options$inputs$covariates)
      if(length(sub_file_cov_all) > 0){
        # Covariate details
        sub_file_cov_found      = intersect(sub_file_cov_all, names(sub_file_dataset))
        sub_file_cov_missing    =   setdiff(sub_file_cov_all, names(sub_file_dataset))
        if(length(sub_file_cov_found) > 0){
          sub_file_cov_found_str  = paste(sub_file_cov_found, collapse=', ') }
        else{
          sub_file_cov_found_str  =  "None" }
        if(length(sub_file_cov_missing) > 0){
          sub_file_cov_missing_str  = paste(sub_file_cov_missing, collapse=', ') }
        else{
          sub_file_cov_missing_str  =  "None" }
        }
      else {
        # No covariates
        sub_file_cov_found      = c()
        sub_file_cov_missing    = c()
        sub_file_cov_found_str  = "" 
        sub_file_cov_missing_str= "" 
      }

     # Checking to make sure that the required rows exist:
     if(!(sub_file_ID_col %in% names(sub_file_dataset))){
       vp(cfg, paste("Error: The required column >", sub_file_ID_col, "< specified dataset >", sub_file, "< is missing", sep="")) 
       vp(cfg, "This column assigns the subject ID to the row.")
       isgood = FALSE
     }
     if(!(sub_file_TIME_col %in% names(sub_file_dataset))){
       vp(cfg, paste("Error: The required column >", sub_file_TIME_col, "< specified dataset >", sub_file, "< is missing", sep="")) 
       vp(cfg, "This column associates the system time with the record and should have the same units as the system time.")
       isgood = FALSE
     }


     # Checking to make sure there is at least one subject:
     if(!(sub_file_nrow >0)){
       vp(cfg, paste("Error: The specified dataset:", sub_file, "contains no data", sep="")) 
       isgood = FALSE
     } else {
       if(isgood){
         if((nsub > sub_file_nsub & sub_file_sample == "without replacement")){
            vp(cfg, " ")
            vp(cfg, "simulate_subjects()")
            vp(cfg, sprintf("Warning: The number of subjects requested (%d) is greater than", nsub))
            vp(cfg, sprintf("the number in the subjects dataset (%d) so it is not", sub_file_nsub))
            vp(cfg, sprintf("possible to sample without replacement. Changing sampling"))
            vp(cfg, sprintf("method to 'with replacement'"))
            vp(cfg, " ")
            sub_file_sample = "with replacement"
         }
       }
     }
  }
  
  # Set the random seed
  set.seed(seed)

  if(isgood){
      vp(cfg, sprintf("Simulating multiple subjects (%d)", nsub), fmt="h1")
      vp(cfg, sprintf("Integrating with:            %s ",  cfg$options$simulation_options$integrate_with))
      vp(cfg, sprintf("Parallel set to:             %s ",  cfg$options$simulation_options$parallel))
      vp(cfg, sprintf("Number of cores:             %d ",  cfg$options$simulation_options$compute_cores))
      if(!is.null(sub_file)){                            
      vp(cfg, sprintf("Subjects source:             %s ", sub_file_file_name))
      vp(cfg, sprintf("   Parameters from file:     %s ", sub_file_p_found_str))
      vp(cfg, sprintf("   Default parameters used:  %s ", sub_file_p_missing_str))
      vp(cfg, sprintf("   Subjects in file:         %d ", sub_file_nsub))
      vp(cfg, sprintf("   Sampling:                 %s ", sub_file_sample))
        if(length(sub_file_cov_all) > 0){
        vp(cfg, sprintf("   Covariates from file:     %s ", sub_file_cov_found_str))
        vp(cfg, sprintf("   Default covariates used:  %s ", sub_file_cov_missing_str))
        }
        else{
        vp(cfg, "   Covariates:               None specified in system ")
        }
      }
    
    
    vp(cfg, "Generating the parameters for subjects.")
    vp(cfg, "Be patient, there may be a delay...")
    
    
    # generating the parameters for each of the subjects
    sub_idx = 1;
    
    # If the subject file null then we generate the subjects using 
    # the specified IIV
    if(is.null(sub_file)){
      while((sub_idx <= nsub) & isgood) {
        subject = generate_subject(parameters,  cfg);
        parameters_subject = subject$parameters;
        if(sub_idx == 1){
          p$subjects$parameters           = parameters_subject
        } else{
          p$subjects$parameters           = rbind(p$subjects$parameters,          parameters_subject)}
      
        sub_idx = sub_idx + 1;
      }
    }
    else{
      # Sampling the subject IDs from the sub_file based on the methodology
      # specified by the user
      if(sub_file_sample == "sequential"){
        file_IDs = rep_len(sub_file_dataset[[sub_file_ID_col]], nsub) 
        }
      else if(sub_file_sample == "with replacement"){
        file_IDs = sample(sub_file_dataset[[sub_file_ID_col]], 
                         size    =  nsub,
                         replace =TRUE) 
        }
      else if(sub_file_sample == "without replacement"){
        file_IDs = sample(sub_file_dataset[[sub_file_ID_col]], 
                         size    =  nsub,
                         replace =FALSE)
        }
    
      for(sub_idx in 1:length(file_IDs)){
         # Ceating the subject parameters with the default values
         parameters_subject = parameters
      
         # Now we overwrite those parameters specified in the dataset
         tmp_sub_records = sub_file_dataset[sub_file_dataset[[sub_file_ID_col]] == file_IDs[sub_idx], ]
         parameters_subject[,sub_file_p_found] = tmp_sub_records[1,sub_file_p_found]
      
         # Storing the subject in the data frame with the other subjects
         if(sub_idx == 1){
           p$subjects$parameters           = parameters_subject
         } else{
           p$subjects$parameters           = rbind(p$subjects$parameters,          parameters_subject)}
      
      
       # Soring the map between the ID in the file and the sampled subject id
       sub_file_ID_map   = rbind(sub_file_ID_map, 
                                 data.frame(file_ID = file_IDs[sub_idx],
                                            sub_ID  = sub_idx))
      
      
      }
    }
    
    # Running simulations
    if(!ponly){
      vp(cfg, "Now running the simulations")
      # Initialzing progress bar
      # If we're running as a script we display this in the console
      # otherwise we initialize a shiny onject
      if(show_progress){
        if(cfg$options$misc$operating_environment == 'script'){
          #pb = txtProgressBar(min=0, max=1, width=12, style=3, char='.') 
          # JMH for parallel
          cli::cli_progress_bar(total=100) 
          }
      }
    
      if(cfg$options$misc$operating_environment == 'gui'){
        pb <- shiny::Progress$new()
        # JMH how to parallelize 
        pb$set(message = progress_message, value = 0)
      }
    
      foreach_packages = c("deSolve", "dplyr")
    
      if(cfg$options$misc$distribution == "package"){
        foreach_packages = c(foreach_packages, "ubiquity")
      }
    
      if("multicore" == cfg$options$simulation_options$parallel){
        #
        # Running simulations in parallel
        #
    
        # Setting up and starting the cluter
        # for doSnow
        # cl <- makeSOCKcluster(cfg$options$simulation_options$compute_cores)
        # registerDoSNOW(cl)
        # snow_opts = list(progress = myprogress)
        # for doParallel
        cl <- makeCluster(cfg$options$simulation_options$compute_cores)
        doParallel::registerDoParallel(cl)
        snow_opts = list()
        
        somall <- foreach(sub_idx=1:nsub,
                          .verbose = FALSE,
                          .errorhandling='pass',
                       #  .options.snow=list(progress = myprogress),
                          .packages=foreach_packages) %dopar% {


          # Setting the seed based on the subject ID and the 
          # user specified seed: this applies to subject level 
          set.seed(cfg$options$stochastic$seed + sub_idx)
    
          # If we're using the c-file we tell the spawned instances to load
          # the library 
          if(cfg$options$simulation_options$integrate_with == "c-file"){
            dyn.load(file.path(cfg$options$misc$temp_directory, paste( cfg$options$misc$c_libfile_base, .Platform$dynlib.ext, sep = "")))
          }
    
          # If we're running a stand alone distribution we load the functions
          if(cfg$options$misc$distribution == "stand alone"){
            source(file.path("library","r_general","ubiquity.R"))}
    
          # now we load the system specific functions
          source(file.path(cfg$options$misc$temp_directory, "auto_rcomponents.R"))
        
          # Pulling out subject level parameters
          parameters_subject = p$subjects$parameters[sub_idx,]
    
          # storing the cfg for the subject
          cfg_sub = cfg
    
          # If we're reading from a file and covariates were specified
          # then we have to apply those on a per subject basis
          if(!is.null(sub_file)){
            if(length(sub_file_cov_found) > 0){
              cfg_sub = 
              apply_sub_file_COV(tmpcfg       = cfg_sub, 
                                 cov_found    = sub_file_cov_found, 
                                 sub_dataset  = sub_file_dataset,
                                 sub_ID_col   = sub_file_ID_col,
                                 sub_TIME_col = sub_file_TIME_col,
                                 file_ID      = sub_file_ID_map[sub_file_ID_map$sub_ID == sub_idx,]$file_ID)
            }
          }
        
          # Running either titration or normal simulation
          if(cfg$titration$titrate){
            tcres = tryCatch(
              { 
               exec.time = system.time((som = run_simulation_titrate(parameters_subject, cfg_sub)))
               list(exec.time = exec.time, som=som, msg="success")},
             error = function(e) {
               list(exec.time = NULL, som=NULL, error=NULL, msg="error")})
            }
          else{
            tcres = tryCatch(
              { 
               exec.time = system.time((som = run_simulation_ubiquity(parameters_subject, cfg_sub)))
               list(exec.time = exec.time, som=som, msg="success")},
             error = function(e) {
               list(exec.time = NULL, som=NULL, error=e, msg="error")})
            }


          som       = tcres$som
          msg       = tcres$msg

          #tmp = run_simulation_ubiquity(parameters_subject, cfg_sub)

          if(msg== "error"){
          # Checking for integration failure
            som$simout      = NULL
            som$skip_reason = "Integration failure"
          } else if(any(is.nan(as.matrix((som$simout))))){
          # checking to see if any of the results returned NAN
            som$simout      = NULL
            som$skip_reason = "NAN values in simulation output"
          }
        
          # Storing the subject id
          som$sub_idx   = sub_idx
        
          # saving the execution time
          som$exec.time = tcres$exec.time

          # storing the result of trycatch error
          som$error     = tcres$error
        
          # if(cfg$options$misc$operating_environment == 'gui'){
          #   pb$inc(1/nsub, detail = sprintf('%d/%d (%d %%)', sub_idx, nsub, floor(100*sub_idx/nsub))) }
          # Only keep the simout columns the user wants 
          if(!is.null(som$simout)){
            som$simout  = dplyr::select(som$simout , all_of(col_keep))
          }
        
          som }
    
        #
        # Stopping the cluster
        #
        stopCluster(cl)
    
      }
      else{
        system_req("foreach")
        #
        # Running simulations sequentially 
        #
        somall <- foreach(sub_idx=1:nsub) %do% {
        
          # Setting the seed based on the subject ID and the 
          # user specified seed: this applies to subject level 
          set.seed(cfg$options$stochastic$seed + sub_idx)
        
          # Pulling out subject level parameters
          parameters_subject = p$subjects$parameters[sub_idx,]
    
          cfg_sub = cfg
    
          # If we're reading from a file and covariates were specified
          # then we have to apply those on a per subject basis
          if(!is.null(sub_file)){
            if(length(sub_file_cov_found) > 0){
              cfg_sub = 
              apply_sub_file_COV(tmpcfg       = cfg_sub, 
                                 cov_found    = sub_file_cov_found, 
                                 sub_dataset  = sub_file_dataset,
                                 sub_ID_col   = sub_file_ID_col,
                                 sub_TIME_col = sub_file_TIME_col,
                                 file_ID      = sub_file_ID_map[sub_file_ID_map$sub_ID == sub_idx,]$file_ID)
            }
          }
        
          # Running either titration or normal simulation
          if(cfg$titration$titrate){
            tcres = tryCatch(
              { 
               exec.time = system.time((som = run_simulation_titrate(parameters_subject, cfg_sub)))
               list(exec.time = exec.time, som=som, msg="success")},
             error = function(e) {
               list(exec.time = NULL, error=NULL, som=NULL, msg="error")})
            }
          else{
            tcres = tryCatch(
              { 
               exec.time = system.time((som = run_simulation_ubiquity(parameters_subject, cfg_sub)))
               list(exec.time = exec.time, som=som, msg="success")},
             error = function(e) {
               list(exec.time = NULL, error=e, som=NULL, msg="error")})
            }
          som       = tcres$som
          msg       = tcres$msg
        
          if(msg== "error"){
          # Checking for integration failure
            som$simout      = NULL
            som$skip_reason = "Integration failure"
          } else if(any(is.nan(as.matrix((som$simout))))){
          # checking to see if any of the results returned NAN
            som$simout      = NULL
            som$skip_reason = "NAN values in simulation output"
          }

          # Storing the subject id
          som$sub_idx = sub_idx
        
          # saving the execution time
          som$exec.time = tcres$exec.time

          # storing the result of trycatch error
          som$error  = tcres$error
        
          # Updating progress indicators
          if(show_progress){
            if(cfg$options$misc$operating_environment == 'script'){
              cli::cli_progress_update(set=sub_idx/nsub*100)
            }
           }
        
          if(show_progress){
            if(cfg$options$misc$operating_environment == 'gui'){
              pb$inc(1/nsub, detail = sprintf('%d/%d (%d %%)', sub_idx, nsub, floor(100*sub_idx/nsub))) }
          }

          # Only keep the simout columns the user wants 
          if(!is.null(som$simout)){
            som$simout  = dplyr::select(som$simout , all_of(col_keep))
          }
        
          som }
      }


      # Pulling out the lengths of different things
      ntimes = length(somall[[1]]$simout$time)
      npsec  = length(ssp_names)
    
      # pulling out the first subject to use below:
      som    = somall[[1]]
    
    
      # Initializing states, outputs, and titration matrices 
      for(state_name   in state_names){
        p$states[[state_name]]            = matrix(0, nsub, ntimes) }
      for(output_name   in output_names){
        p$outputs[[output_name]]          = matrix(0, nsub, ntimes) }
      for(titration_name   in names(som$titration)){
        p$titration[[titration_name]]     = matrix(0, nsub, ntimes) }
    
      # Initializing the secondary parameters
      # Creating the data frame
      p[["subjects"]][["secondary_parameters"]]  = NULL
      if(npsec > 0){
        p[["subjects"]][["secondary_parameters"]]  = as.data.frame(matrix(0, ncol = npsec, nrow=nsub))
        
        # putting the column names
        colnames( p[["subjects"]][["secondary_parameters"]]) = ssp_names
      }
    
      # And storing the output times/timescales
      p$times    = som$simout["time"]
      # creating the time patch vectors for the different timescales
      for(timescale_name   in names(cfg$options$time_scales)){
       timescale_name = sprintf('ts.%s', timescale_name)
       p$times[[timescale_name]] = c(som$simout[[timescale_name]])
      }

      subs_skipped = NULL
    
      for(som in somall){
        sub_idx = som$sub_idx
      
        # If som$simout is null it needs to be skipped 
        # so we capture that information here:
        if(is.null(som$simout)){
         # Storing the id of the subject being skipped
         subs_skipped = rbind(subs_skipped, 
                   data.frame(id     = sub_idx,
                              reason = som$skip_reason))
        } else {
          # storing the secondary parameters
          if(npsec > 0){
            p$subjects$secondary_parameters[sub_idx,] = som$simout[1,ssp_names]
          }
         
          # Storing the states, outputs and titration information
          for(state_name   in state_names){
            p$states[[state_name]][sub_idx,] = som$simout[[state_name]] }
          
          for(output_name   in output_names){
            p$outputs[[output_name]][sub_idx,] = som$simout[[output_name]] }
         
          for(titration_name   in names(som$titration)){
            p$titration[[titration_name]][sub_idx,] = som$titration[[titration_name]]}
        }
        sub_idx = sub_idx + 1;
      }


      #------------------------------------
      # Processing skipped subjects
      if(!is.null(subs_skipped)){
        # Saving the parameter combinations that caused the problems
        subs_skipped$parmaeters = p$subjects$parameters[as.numeric(subs_skipped$id),]

        # Removing the rows associated with skipped subjects from the
        # Parameters
        p$subjects$parameters = p$subjects$parameters[-as.numeric(subs_skipped$id), ]
        # Secondary parameters
        p$subjects$secondary_parameters = p$subjects$secondary_parameters[-as.numeric(subs_skipped$id), ]
        # States
        for(state_name   in state_names){
          p$states[[state_name]][-as.numeric(subs_skipped$id),]
        }
        # Outputs
        for(output_name   in output_names){
          p$outputs[[output_name]][-as.numeric(subs_skipped$id),]
        }
        # Titration names
        for(titration_name   in  names(som$titration)){
          p$titrations[[titration_name]][-as.numeric(subs_skipped$id),]
        }

        vp(cfg, "The following subjects were skipped")
        for(sub_idx in subs_skipped$id){
          vp(cfg, paste(" ", sub_idx, subs_skipped[subs_skipped$id == sub_idx, ]$reason))
        }
        vp(cfg, paste("The results will only include", nrow(p$subjects$parameters), "subjects"))
      }
      p$subs_skipped = subs_skipped
      #------------------------------------
    
      # Cleaning up the progress bar objects
      if(show_progress){
        if(cfg$options$misc$operating_environment == 'script'){
          cli::cli_progress_done()
        }
      }
      if(cfg$options$misc$operating_environment == 'gui'){
          pb$close()}
      
    }
    
    
    #
    # summarizing the data into a data frame with means, medians, confidence intervals, etc.
    #
    if(!ponly){
      for(timescale_name   in names(cfg$options$time_scales)){
        if("tcsummary" %in% names(p)){
          eval(parse(text=sprintf('p$tcsummary[["ts.%s"]] = som$simout[["ts.%s"]]', timescale_name, timescale_name))) 
        }else{
          eval(parse(text=sprintf('p$tcsummary = data.frame(ts.%s =  som$simout[["ts.%s"]])', timescale_name, timescale_name))) 
        }
      }
      for(state_name   in names(p$states)){
        mymat = p$states[[state_name]]
        tc = timecourse_stats(mymat,ci)
        eval(parse(text=sprintf('p$tcsummary[["s.%s.lb_ci"]]   = tc$stats$lb_ci',   state_name))) 
        eval(parse(text=sprintf('p$tcsummary[["s.%s.ub_ci"]]   = tc$stats$ub_ci',   state_name))) 
        eval(parse(text=sprintf('p$tcsummary[["s.%s.mean"]]    = tc$stats$mean',    state_name))) 
        eval(parse(text=sprintf('p$tcsummary[["s.%s.median"]]  = tc$stats$median',  state_name))) 
        }
      for(output_name   in names(p$outputs)){
        mymat = p$outputs[[output_name]]
        tc = timecourse_stats(mymat,ci)
        eval(parse(text=sprintf('p$tcsummary[["o.%s.lb_ci"]]   = tc$stats$lb_ci',   output_name))) 
        eval(parse(text=sprintf('p$tcsummary[["o.%s.ub_ci"]]   = tc$stats$ub_ci',   output_name))) 
        eval(parse(text=sprintf('p$tcsummary[["o.%s.mean"]]    = tc$stats$mean',    output_name))) 
        eval(parse(text=sprintf('p$tcsummary[["o.%s.median"]]  = tc$stats$median',  output_name))) 
        }
    }
  }

} else {
  vp(cfg, "Error:Trying to simulate subjects with       ")
  vp(cfg, "   variability, but no variance/covariance   ")
  vp(cfg, "   information or dataset containing         ")
  vp(cfg, "   population information was specified.     ")
  vp(cfg, "                                             ")
  vp(cfg, "                                             ")
  vp(cfg, "   Modify the system.txt file to add the     ")
  vp(cfg, "   IIV information using the following:      ")
  vp(cfg, "    <IIV:?>      ?                           ")
  vp(cfg, "    <IIV:?:?>    ?                           ")
  vp(cfg, "    <IIVCOR:?:?> ?                           ")
  vp(cfg, "                                             ")
  vp(cfg, "   Or load a dataset with subject parameters ")
  vp(cfg, "   and covariates and specify this in the    ")
  vp(cfg, "   stochastic options.                       ")
  isgood = FALSE
}

if(!isgood){
  vp(cfg, "simulate_subjects()")
}
cli::cli_rule()

return(p)
}


#'@export
#'@title Calculate Timecourse Statistics for a Matrix of Responses
#'@keywords internal
#'@description 
#'  Given a matrix (d) of time courses (each row is an individual and each column is
#'  a time point) and a confidence interval (ci) this will calculate the mean,
#'  median, confidence intervals and a vector of values for creating patches.
#'
#'@param d matrix of responses (each row an individual and each column a time point)
#'@param ci confidence interval in percent (eg, 95)
#'
#'@return List with the following elements:
#'
#' \itemize{
#'   \item \code{stats$ub_ci}  vector of confidence interval upper bound 
#'   \item \code{stats$lb_ci}  vector of confidence interval lower bound 
#'   \item \code{stats$mean}   vector of mean values
#'   \item \code{stats$median} vector of median values
#'   }
timecourse_stats = function (d, ci){

tc = list();

myci = ci/100
dsorted = apply(d, 2, sort)
nsubs   = length(dsorted[,1]) 
lb_idx  = nsubs*(1-myci)/2 + 1;
ub_idx  = nsubs - nsubs*(1-myci)/2;

tc$stats$lb_ci  = apply(rbind(dsorted[floor(lb_idx),],  dsorted[ ceiling(lb_idx),]), 2, mean)
tc$stats$ub_ci  = apply(rbind(dsorted[floor(ub_idx),],  dsorted[ ceiling(ub_idx),]), 2, mean)

tc$stats$mean   = apply(dsorted, 2, mean)
tc$stats$median = apply(dsorted, 2, median)


tc$patch$ci  = c(tc$stats$ub_ci,  rev(tc$stats$lb_ci))

return(tc)

}


#'@export
#'@title Extracts Covariates for a Subject from a Subject Data File
#'@keywords internal
#'@description 
#' This function is used when stochastic simulations are being performed using
#' a data file for the subject level information. If the data file contains
#' covariate information, this function will update the system for each subjects
#' covariates. 
#'
#'@param tmpcfg ubiquity system object    
#'@param cov_found list of covariates found in dataset
#'@param sub_dataset name of dataset with subject parameters
#'@param sub_ID_col name of column in dataset with subject IDs 
#'@param sub_TIME_col name of column in dataset with simulation time
#'@param file_ID subject ID to extract covariates for
#'
#'@return ubiquity system object with the covariates set to those for the current subject
apply_sub_file_COV = function (tmpcfg, cov_found, sub_dataset, sub_ID_col, sub_TIME_col, file_ID){
# This function is used when stochastic simulations are being performed using
# a data file for the subject level information. If the data file contains
# covariate information, this function will update the system for each subjects
# covariates. 

# Pulling all records for the current subject
sub_records = sub_dataset[sub_dataset[[sub_ID_col]] == file_ID,]

# Looping through each covariate and updating the cfg file
for(cov_name in cov_found){
  tmpcfg = system_set_covariate(tmpcfg, cov_name,          
                                        times  = sub_records[[sub_TIME_col]],
                                        values = sub_records[[cov_name]])
}

return(tmpcfg)
}

#'@export
#'@title Generate Subject
#'@keywords internal
#'@description 
#' Generates subject with variability specified using the \code{<IIV:?>} descriptor
#' in the system file
#'
#'@param parameters vector of nominal parameter values
#'@param cfg ubiquity system object    
#'
#'@return List with a field named \code{parameters} containing a sample representing a subject
generate_subject = function (parameters, cfg){
# function [subject] = generate_subject(parameters, cfg)

invisible(system_req("MASS"))

subject = list()
subject$parameters   = parameters;


#
# Generating the subject
#
#iiv_parameter_names = fieldnames(cfg.iiv.parameters);
# creating a temporary vector containing the typical values of all of the
# parameters:
TMP_parameters_all = parameters;

# defining the mean of the IIVs and the covariance matirx
covmatrix = cfg$iiv$values;
muzero    = matrix(0, nrow(covmatrix),1)

# Generating the normal sample:
iiv_sample = MASS::mvrnorm(n = 1, muzero, covmatrix, tol = 1e-6, empirical = FALSE, EISPACK = FALSE);

# now looping through each parameter with inter-individual variability
#names(cfg$iiv$iivs)
#names(cfg$iiv$parameters)
TMP_equation  = NULL
TMP_iiv_value = NULL
TMP_iiv_name  = NULL
for(TMP_parameter_name in names(cfg$iiv$parameters)){

  # getting the typical value of the parameter
  TMP_parameter_value = parameters[TMP_parameter_name];

  # pulling out the distribution and IIV name
  eval(parse(text=paste(sprintf("TMP_equation     = cfg$iiv$parameters$%s$equation",    TMP_parameter_name))))
  eval(parse(text=paste(sprintf("TMP_iiv_name     = cfg$iiv$parameters$%s$iiv_name",    TMP_parameter_name))))

  # pulling out the random IIV value for the current iiv
  eval(parse(text=paste(sprintf("TMP_iiv_value = iiv_sample[cfg$options$mi$iiv$%s]",TMP_iiv_name))))

  TMP_subject_parameter_value = generate_parameter(parameters, cfg, TMP_parameter_value, TMP_iiv_value, TMP_equation);

  # Storing the sample in the vector with all parameters
  subject$parameters[TMP_parameter_name] = TMP_subject_parameter_value
}


return(subject)

}

#'@export
#'@title Generates a Parameter Based on \code{<IIV:?>} in the System File
#'@description  Internal function used to generate parameters based on IIV information 
#'@keywords internal
#'
#'@param SIMINT_parameters parameters vector containing the typical values
#'@param SIMINT_cfg ubiquity system object    
#'@param SIMINT_PARAMETER_TV  Typical value of the parameter in question
#'@param SIMINT_IIV_VALUE sample from mvr distribution
#'@param SIMINT_equation equation relating IIV and typical value to the parameter value with variability
#'
#'@return parameter value with the variability applied
generate_parameter = function (SIMINT_parameters, SIMINT_cfg, SIMINT_PARAMETER_TV, SIMINT_IIV_VALUE, SIMINT_equation){
  # Defining the system parameters locally
  for(SIMINT_pname in names(SIMINT_cfg$options$mi$parameters)){
    eval(parse(text=paste(sprintf("%s = SIMINT_parameters[SIMINT_cfg$options$mi$parameters$%s]", SIMINT_pname, SIMINT_pname))))
  }

  # Evaluating the parameter with IIV
  return( eval(parse(text=paste(SIMINT_equation))))
}


#'@export
#'@title Initialize System Log File
#'@description Initializes the currently specified system log file.
#'@param cfg ubiquity system object    
#'
#'@return ubiquity system object with logging enabled
#'
#'@examples
#' \donttest{
#' # Creating a system file from the mab_pk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#' 
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'
#' # Initialzing the log file
#' cfg = system_log_init(cfg)
#'}
system_log_init = function (cfg){
# initializes the log file then enables logging

  file.create(cfg$options$logging$file)
  cfg$options$logging$enabled = TRUE
  system_log_entry(cfg, 'Ubiquity log init - R')

return(cfg)
}

#-------------------------------------------------------------------------
#'@export
#'@title Save variables to files     
#'@description Triggered when debugging is enabled, this function will save
#' the contents of values to the specified file name in the ubiquity temporary
#' directory.
#'@param cfg ubiquity system object    
#'@param file_name name of the save file without the ".RData" extension
#'@param values named list of variables to save
#'
#'@return Boolean variable indicating success 
#'
#'@examples
#' \donttest{
#' # Creating a system file from the mab_pk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#'
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'
#' # enable debugging:
#' cfg=system_set_option(cfg,group = "logging", 
#'                          option = "debug", 
#'                          value  = TRUE)
#'
#' # Saving the cfg variable 
#' system_log_debug_save(cfg, 
#'    file_name = 'my_file',
#'    values = list(cfg=cfg))
#'
#'}
system_log_debug_save = function (cfg, file_name = "my_file", values = NULL){

   isgood = TRUE

   if(cfg$options$logging$debug){
     if(is.null(values)){
       isgood = FALSE
       vp(cfg, "ubiquity::system_log_debug_save()")
       vp(cfg, "values set to NULL")
     } else if(!is.null(values)){
       # file name to hold the debugging information
       fn = file=file.path(cfg$options$misc$temp_directory, paste(file_name, ".RData", sep=""))
       system_log_entry(cfg, paste("Debugging file:", fn))
       save(values, file=fn)
     }
   }

isgood}
#-------------------------------------------------------------------------
#'@export
#'@title Add Log Entry
#'@description Appends a specified line to the analysis log
#'@keywords internal
#'
#'@param cfg ubiquity system object    
#'@param entry string containing the log entry
#'
#'@return Boolean variable indicating success (\code{TRUE}) or failure (\code{FALSE})
#'
#'@examples
#' \donttest{
#' # Creating a system file from the mab_pk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#' 
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'
#' # Initialzing the log file
#' system_log_entry(cfg, "Text of log entry")
#'}
system_log_entry = function(cfg, entry){

isgood = FALSE

# if logging is disabled we don't do anything 
if(cfg$options$logging$enabled ==  TRUE){
  # If the log file doesn't exist we initialize it
  if(!file.exists(cfg$options$logging$file)){
   system_log_init(cfg);
  }
  # If the timestamp is enabled we prepend it to the
  # log message
  if(cfg$options$logging$timestamp == TRUE){
    entry = sprintf('%s %s',  format(Sys.time(), format=cfg$options$logging$ts_str), entry)
  }

  # Now we dump it to the log file:
  isgood = write(entry, file=cfg$options$logging$file, append=TRUE)
  }
isgood}

#'@export
#'@title Print and Log Messages
#'@description  Used to print messages to the screen and the log file.
#'
#'@param cfg ubiquity system object    
#'@param str sequence of strings to print
#'@param fmt string format should be one of the following: \code{"h1"},
#'\code{"h2"}, \code{"h3"}, \code{"verbatim"}, \code{"alert"} (default), \code{"warning"},
#'\code{"danger"}. 
#'
#'@return Boolean variable indicating success (\code{TRUE}) or failure (\code{FALSE})
#'
#'@examples
#' \donttest{
#' # Creating a system file from the mab_pk example
#' fr = system_new(file_name        = "system.txt", 
#'                 system_file      = "mab_pk", 
#'                 overwrite        = TRUE, 
#'                 output_directory = tempdir())
#' 
#' # Building the system 
#' cfg = build_system(system_file  = file.path(tempdir(), "system.txt"),
#'       output_directory          = file.path(tempdir(), "output"),
#'       temporary_directory       = tempdir())
#'
#' # Initialzing the log file
#' vp(cfg, "Message that will be logged")
#'}
vp <- function(cfg, str, fmt="alert"){
# logging string 
system_log_entry(cfg, str)

isgood = FALSE

# printing if verbose is enabled
if('options' %in% names(cfg)){
if('verbose' %in% names(cfg$options$logging)){
if(TRUE == cfg$options$logging$verbose){
  for(line in str){
    if(fmt == "alert"){
      cli::cli_alert(line) }
    if(fmt == "h1"){
      cli::cli_h1(line) }
    if(fmt == "h2"){
      cli::cli_h2(line) }
    if(fmt == "h3"){
      cli::cli_h3(line) }
    if(fmt == "danger"){
      cli::cli_alert_danger(line) }
    if(fmt == "warning"){
      cli::cli_alert_warning(line) }
    if(fmt == "verbatim"){
      cli::cli_verbatim(line) }
  }
  isgood = TRUE
  }}}
isgood}

#'@export
#'@keywords internal
#'@title Wrapper for system_log_entry Used in ShinyApp
#'@description Called from the ShinyApp to add a log entry with "App"
#' prepended to the log entry 
#'
#'@param cfg ubiquity system object    
#'@param text string to print/log
#'
#'@return Boolean variable indicating success (\code{TRUE}) or failure (\code{FALSE})
GUI_log_entry <-function(cfg, text){
 isgood =   system_log_entry(cfg, sprintf("App %s", text))
isgood}

#'@export
#'@keywords internal
#'@title Select Records from NONMEM-ish Data Set
#'@description Retrieves a subset of a NONMEM-ish data set based on a list containing filtering information.
#'@keywords internal
#'
#'@param cfg ubiquity system object    
#'@param values dataframe containing the dataset with column headers
#'@param filter list with element names as headers for \code{values} with values from the same header OR'd and values across headers AND'd
#'
#'@return subset of dataset 
#'
#'@details
#' If the dataset has the headings \code{ID}, \code{DOSE} and \code{SEX}  and
#' \code{filter} has the following format:
#'
#' \preformatted{
#'filter = list()
#'filter$ID   = c(1:4)
#'filter$DOSE = c(5,10)
#'filter$SEX  = c(1)
#'}
#'
#'It would be translated into the boolean filter:
#'
#'\preformatted{
#'((ID==1) | (ID==2) | (ID==3) | (ID==4)) & ((DOSE == 5) | (DOSE==10)) & (SEX == 1)
#'}
nm_select_records    <- function(cfg, values, filter){

  cols = names(filter) 
  if(length(cols) > 0){

    for(column_name in cols){
      #checking to see if the column exists in the dataset
      if(column_name %in% names(values)){
        # subsetting based on the current filter
        #values = values[values[[column_name]] == filter[[column_name]], ]
        values = values[values[[column_name]] %in% filter[[column_name]], ]
      } 
      else{
        vp(cfg, sprintf(' fieldname: %s not found ignoring this entry', column_name))
      }
    }
  }

  return(values)
}

#'@export
#'@keywords internal
#'@title Convert Time in Timescale to Simulation Time
#'@description 
#' converts a time specified in a defined timescale (say weeks) to the
#' timescale of the simulation (say hours if the rates are in 1/hr units)
#'
#'@param cfg ubiquity system object    
#'@param tstime numeric time of the timescale
#'@param ts string containing the timescale 
#'
#'@return \code{tstime} in the system timescale units 
system_ts_to_simtime <-function(cfg, tstime, ts){
   simtime = c()
   if(ts %in% names(cfg$options$time_scales)){
     simtime = tstime/cfg$options$time_scales[[ts]]
   }
   else{
    vp(cfg, sprintf('Unable to find timescale %s', ts)) }
    return(simtime)
}

#'@export
#'@title Clear all Cohorts
#'@description Clear previously defined cohorts
#'
#'@param cfg ubiquity system object    
#'
#'@return ubiquity system object with no cohorts defined
system_clear_cohorts  <- function(cfg){
  cfg[["cohorts"]] = c()
return(cfg)}

#'@export
#'@title Define Estimation Cohort
#'@description Define a cohort to include in a parameter estimation
#'
#'@param cfg ubiquity system object    
#'@param cohort list with cohort information 
#'
#'@return ubiquity system object with cohort defined 
#'
#'@details 
#' Each cohort has a name (eg \code{d5mpk}), and the dataset containing the
#' information for this cohort is identified (the name defined in \code{\link{system_load_data}})
#'
#' \preformatted{cohort = list(
#'   name         = "d5mpk",
#'   dataset      = "pm_data",
#'   inputs       = NULL,
#'   outputs      = NULL)}
#'
#' Next if only a portion of the dataset applies to the current cohort, you
#' can define a filter (\code{cf} field). This will be 
#' applied to the dataset to only return values relevant to this cohort. For
#' example, if we only want records where the column \code{DOSE} is 5 (for the 5
#' mpk cohort). We can use the following: 
#'
#' \preformatted{cohort[["cf"]]   = list(DOSE   = c(5))}
#' 
#' If the dataset has the headings \code{ID}, \code{DOSE} and \code{SEX}  and
#' cohort filter had the following format:
#' 
#' \preformatted{cohort[["cf"]]   = list(ID    = c(1:4),
#'                         DOSE  = c(5,10),
#'                         SEX   = c(1))}
#'
#'It would be translated into the boolean filter:
#'
#'\preformatted{(ID==1) | (ID==2) | (ID==3) | (ID==4)) & ((DOSE == 5) | (DOSE==10)) & (SEX == 1)}
#'
#' Optionally you may want to fix a system parameter to a different value for a
#' given cohort. This can be done using the cohort parameter (\code{cp}) field.
#' For example if you had the body weight defined as a system parameter 
#' (\code{BW}), and you wanted to fix the body weight to 70 for the current
#' cohort you would do the following:
#'
#' \preformatted{cohort[["cp"]]   = list(BW        = c(70))}
#'
#' Note that you can only fix parameters that are not being estimated.
#'
#' By default the underlying simulation output times will be taken from the
#' general output_times option (see \code{\link{system_set_option}}). However It may also be 
#' necessary to specify simulation output times for a specific cohort. The
#' \code{output_times} field can be used for this. Simply provide a vector of
#'  output times:
#'
#' \preformatted{cohort[["output_times"]]   = seq(0,100,2)}
#'
#' Next we define the dosing for this cohort. It is only necessary to define
#' those inputs that are non-zero. So if the data here were generated from
#' animals given a single 5 mpk IV at time 0. Bolus dosing is defined 
#' using \code{<B:times>} and \code{<B:events>}. If \code{Cp} is the central
#' compartment, you would pass this information to the cohort in the
#' following manner:
#'
#' \preformatted{cohort[["inputs"]][["bolus"]] = list()
#' cohort[["inputs"]][["bolus"]][["Cp"]] = list(TIME=NULL, AMT=NULL)
#' cohort[["inputs"]][["bolus"]][["Cp"]][["TIME"]] = c( 0) 
#' cohort[["inputs"]][["bolus"]][["Cp"]][["AMT"]]  = c( 5)}
#'  
#' Inputs can also include any infusion rates (\code{infusion_rates}) or
#' covariates (\code{covariates}). Covariates will have the default value
#' specified in the system file unless overwritten here. The units here are
#' the same as those in the system file
#'  
#' Next we need to map the outputs in the model to the observation data in the
#' dataset. Under the \code{outputs} field there is a field for each output. Here 
#' the field \code{ONAME} can be replaced with something more useful (like 
#' \code{PK}). 
#'
#' \preformatted{cohort[["outputs"]][["ONAME"]] = list()}
#'
#' If you want to further filter the dataset. Say for example you
#' have two outputs and the \code{cf} applied above reduces your dataset
#' down to both outputs. Here you can use the  "of" field to apply an "output filter"
#' to further filter the records down to those that apply to the current output ONAME. 
#' \preformatted{cohort[["outputs"]][["ONAME"]][["of"]] = list(
#'        COLNAME          = c(),
#'        COLNAME          = c())}
#' If you do not need further filtering of data, you can you can just omit the field.
#'
#' Next you need to identify the columns in the dataset that contain your
#' times and observations. This is found in the \code{obs} field for the 
#' current observation:
#' \preformatted{cohort[["outputs"]][["ONAME"]][["obs"]] = list(
#'          time           = "TIMECOL",
#'          value          = "OBSCOL",
#'          missing        = -1)}
#'
#' The times and observations in the dataset are found in the \code{’TIMECOL’} column 
#' and the \code{’OBSCOL’} column (optional missing data option specified by -1). 
#'
#' These observations in the dataset need to be mapped to the appropriate
#' elements of your model defined in the system file. This is done with the
#' \code{model} field:
#'
#' \preformatted{cohort[["outputs"]][["ONAME"]][["model"]] = list(
#'          time           = "TS",       
#'          value          = "MODOUTPUT",
#'          variance       = "PRED^2")}
#'
#' First the system time scale indicated by the \code{TS} placeholder above
#' must be specfied. The time scale must correspond to the data found in
#' \code{TIMECOL} above.  Next the model output indicated by the \code{MODOUTPUT}
#' placeholder needs to be specified. This is defined in the system file using
#' \code{<O>} and should correspond to \code{OBSCOL} from the dataset. Lastly the
#' \code{variance} field specifies the variance model. You can use the keyword
#' \code{PRED} (the model predicted output) and any variance parameters. Some
#' examples include:
#'
#' \itemize{
#'   \item \code{variance = "1"} - Least squares
#'   \item \code{variance = "PRED^2"} -  Weighted least squares proportional to the prediction squared
#'   \item \code{variance = "(SLOPE*PRED)^2"}  Maximum likelihood estimation where \code{SLOPE} is defined as a variance parameter (\code{<VP>})
#' }
#'
#' The following controls the plotting aspects associated with this output. The
#' color, shape and line values are the values used by ggplot functions. 
#'
#' \preformatted{cohort[["outputs"]][["ONAME"]][["options"]] = list(
#'         marker_color   = "black",
#'         marker_shape   = 16,
#'         marker_line    = 1 )}
#' 
#' If the cohort has multiple outputs, simply repeat the process above for the. 
#' additional cohorts. The estimation vignettes contains examples of this. 
#' 
#' \bold{Note: Output names should be consistent between cohorts so they will be grouped together when plotting results.}
#' 
#'@seealso Estimation vignette (\code{vignette("Estimation", package = "ubiquity")})
system_define_cohort <- function(cfg, cohort){
  
 if('options' %in% names(cohort)){
   cohort$options = c() }

 defopts = c()
 defopts[["marker_color"]]   = 'black'           
 defopts[["marker_shape"]]   = 0           
 defopts[["marker_line"]]    = 1
 
 validopts = c('marker_color', 'marker_shape', 'marker_line')

 

 # Default values for control structures
 isgood      = TRUE
 datasetgood = TRUE 
 

 #
 # checking the cohort name
 #
 if('name' %in% names(cohort)){
  if(cohort[["name"]] %in% names(cfg[["cohorts"]])){
    isgood = FALSE
    vp(cfg, sprintf('Error: cohort with name >%s< has already been defined', cohort[["name"]]))
  }
  else{
    name_check = ubiquity_name_check(cohort[["name"]])

    cohort_name = cohort[["name"]]
    # Checking the cohort name
    if(!name_check[["isgood"]]){
      isgood = FALSE
      vp(cfg, sprintf('Error: cohort with name >%s< is invalid', cohort[["name"]]))
      vp(cfg, sprintf('Problems: %s', name_check[["msg"]]))
      }
    }
 }
 else{
   isgood = FALSE 
   vp(cfg, 'Error: cohort name not specified')
   cohort_name = 'no name specified' 
 }

 #
 # checking the dataset details 
 #
 if('dataset' %in% names(cohort)){
   if(cohort$dataset %in% names(cfg$data)){
     # pulling the dataset out to test for fields below
     tmpdataset = cfg$data[[cohort$dataset]]
   }
   else{
     isgood      = FALSE 
     datasetgood = FALSE 
     vp(cfg, sprintf('Error: dataset >%s< not found, please load first', cohort$dataset))
   }
 }
 else{
   isgood      = FALSE 
   datasetgood = FALSE 
   vp(cfg, 'Error: dataset not specified for the cohort')
 }

 #
 # checking cohort-specific parameters
 # 
 if('cp' %in% names(cohort)){
   for(pname in names(cohort$cp)){
     if(!(pname %in% names(cfg$parameters$values))){
       isgood = FALSE
       vp(cfg, sprintf('Error: The parameter >%s< ', pname))
       vp(cfg, sprintf('       is not defined. Check the spelling'))
       vp(cfg, sprintf('       or define the parameter using <P> '))
     }
     else{
       if((pname %in% names(cfg$estimation$mi))){
         isgood = FALSE
         vp(cfg, sprintf('Error: The parameter >%s< ', pname))
         vp(cfg, sprintf('       is selected for estimation. It is ')) 
         vp(cfg, sprintf('       not possible to fix a parameter   ')) 
         vp(cfg, sprintf('       that is being estiamted.          ')) 
       }
     }
   }
 }


 #
 # checking cohort-filter columns against the dataset
 # 
 if(datasetgood){
   if('cf' %in% names(cohort)){
     for(cname in names(cohort$cf)){
       if(!(cname %in% names(cfg$data[[cohort$dataset]]$values))){
         isgood = FALSE
         vp(cfg, sprintf('Error: The column >%s< in the cohort filter ', cname)) 
         vp(cfg, sprintf('       was not found in the data set >%s< ', cohort$dataset)) 
       }
     }
   }
   else{
     cohort$cf = c()
     vp(cfg, sprintf('Warning: No cohort filter was specified.')) 
   }
 }


 #
 # checking inputs
 #
 if('inputs' %in% names(cohort)){
   # Bolus Inputs
   if('bolus' %in% names(cohort$inputs)){
     if('bolus' %in% names(cfg$options$inputs)){
       # processing each bolus input
       for(iname in names(cohort$inputs$bolus)){
         if(iname %in% names(cfg$options$inputs$bolus$species)){
           if('AMT'  %in% names(cohort$inputs$bolus[[iname]]) & 
              'TIME' %in% names(cohort$inputs$bolus[[iname]])){
             if(length(cohort$inputs$bolus[[iname]]$AMT) != length(cohort$inputs$bolus[[iname]]$TIME)){
               isgood = FALSE
               vp(cfg, sprintf('Error: For the bolus input >%s< the length of ', iname))
               vp(cfg, sprintf('       the AMT and TIME fields need to be the same'))
             }
           }
           else{
            isgood = FALSE
            vp(cfg, sprintf("Error: The bolus input >%s< needs an 'AMT' and a 'TIME' field", iname))
            vp(cfg, sprintf('       cohort$inputs$bolus$%s$AMT  = c()', iname))
            vp(cfg, sprintf('       cohort$inputs$bolus$%s$TIME = c()', iname))
           }
         }
         else{
          isgood = FALSE  
          vp(cfg, sprintf('Error: The bolus input >%s< has not been defined for this system', iname))
          vp(cfg, sprintf('       <B:times>;  %s  []; scale; units', pad_string('', nchar(iname))))
          vp(cfg, sprintf('       <B:events>; %s; []; scale; units', iname))
         }
       }
     }
     else{
      isgood = FALSE
      vp(cfg, sprintf('Error: A bolus input was specified for this cohort but'))
      vp(cfg, sprintf('       there are no bolus inputs defined in the system.txt file.'))
      vp(cfg, sprintf('       <B:times>;         []; scale; units'))
      vp(cfg, sprintf('       <B:events>; STATE; []; scale; units'))
     
     
     }
   }

   # Infusion rates
   if('infusion_rates' %in% names(cohort$inputs)){
     if('infusion_rates' %in% names(cfg$options$inputs)){
       # processing each infusion rate
       for(iname in names(cohort$inputs$infusion_rates)){
         if(iname %in% names(cfg$options$inputs$infusion_rates)){
           if('AMT'  %in% names(cohort$inputs$infusion_rates[[iname]]) & 
              'TIME' %in% names(cohort$inputs$infusion_rates[[iname]])){
             if(length(cohort$inputs$infusion_rates[[iname]]$AMT) != length(cohort$inputs$infusion_rates[[iname]]$TIME)){
               isgood = FALSE
               vp(cfg, sprintf('Error: For the infusion rate >%s< the length of ', iname))
               vp(cfg, sprintf('       the AMT and TIME fields need to be the same'))
             }
           }
           else{
            isgood = FALSE
            vp(cfg, sprintf("Error: The infusion rate >%s< needs an 'AMT' and a 'TIME' field", iname))
            vp(cfg, sprintf('       cohort$inputs$infusion_rates$%s$AMT  = c()', iname))
            vp(cfg, sprintf('       cohort$inputs$infusion_rates$%s$TIME = c()', iname))
           }
         }
         else{
          isgood = FALSE  
          vp(cfg, sprintf('Error: The infsuion rate >%s< has not been defined for this system', iname))
          vp(cfg,