tests/testthat/test-unit.R

# Unit tests for baydem functions. These tests are fast but do not check
# function. Thus, they can be quickly run as code is actively developed. The
# function tests are located in test-function.R. They take much longer to run
# and should primarily be run before pushing updates back to github.

# ------------------------------------------------------------------------------
# (1) Do unit tests for functions that generate simulated data
#
# Coverage: sample_gauss_mix
#           draw_rc_meas_using_date
#           load_calib_curve [indirect]
#           simulate_rc_data
# ------------------------------------------------------------------------------

# (1a) test sample_gauss_mix
N <- 75
th_sim <-
  c(
    pi1 = 0.2,
    pi2 = 0.8,
    mu1 = 775,
    mu2 = 1000,
    sig1 = 35,
    sig2 = 45
  )

expect_error(
  sample <- sample_gauss_mix(N,
                             th_sim),
  NA
)

expect_equal(
  length(sample),
  N
)

tau_min <- 600
tau_max <- 1300
expect_error(
  sample <- sample_gauss_mix(N,
                             th_sim,
                             tau_min,
                             tau_max),
  NA
)

expect_equal(
  length(sample),
  N
)

expect_error(
  sample_gauss_mix(N, 1:15),
  "The maximum number of supported mixture components is 4"
)

# (1b) test draw_rc_meas_using_date
calib_df <- load_calib_curve("intcal20")
# test with is_AD FALSE
expect_error(
  rc_meas <- draw_rc_meas_using_date(sample,
                                     calib_df,
                                     list(type="unif_fm",
                                             min=.0021,
                                             max=.0028),
                                     is_AD=F),
  NA
)

expect_equal(
  names(rc_meas),
  c("phi_m","sig_m","trc_m","sig_trc_m")
)

for(vector_name in names(rc_meas)) {
  expect_equal(
    length(rc_meas[[vector_name]]),
    N
  )
}

expect_error(
  rc_meas <- draw_rc_meas_using_date(sample,
                                     calib_df,
                                     list(type="not_valid",
                                             min=.0021,
                                             max=.0028),
                                     is_AD=F),
  "Unrecognized error type = not_valid"
)

# test with is_AD TRUE
expect_error(
  rc_meas <- draw_rc_meas_using_date(sample,
                                     calib_df,
                                     list(type="unif_fm",
                                             min=.0021,
                                             max=.0028),
                                     is_AD=T),
  NA
)

expect_equal(
  names(rc_meas),
  c("phi_m","sig_m","trc_m","sig_trc_m")
)

for(vector_name in names(rc_meas)) {
  expect_equal(
    length(rc_meas[[vector_name]]),
    N
  )
}

expect_error(
  rc_meas <- draw_rc_meas_using_date(sample,
                                     calib_df,
                                     list(type="not_valid",
                                             min=.0021,
                                             max=.0028),
                                     is_AD=T),
  "Unrecognized error type = not_valid"
)

# (1c) test simulate_rc_data

# Check simulation of a non-truncated Gaussian mixture with no random number
# seed set
sim_spec <- list(model_spec=
                   list(density_type = "gauss_mix",
                        th=th_sim,
                        error_spec=list(type="unif_fm",min=.0021,max=.0028),
                        is_AD=T),
                 N=N,
                 calib_curve="intcal20")

expect_error(
  sim <- simulate_rc_data(sim_spec),
  NA
)

expect_equal(
  names(sim),
  c("sim_spec","data")
)

expect_equal(
  names(sim$data),
  c("dates","rc_meas")
)

expect_equal(
  length(sim$data$dates),
  N
)

# Check simulation of a truncated Gaussian mixture with no random number seed
# set
sim_spec <- list(model_spec=
                   list(density_type = "trunc_gauss_mix",
                        th=c(th_sim,tau_min,tau_max),
                        error_spec=list(type="unif_fm",min=.0021,max=.0028),
                        is_AD=T),
                 N=N,
                 calib_curve="intcal20")

expect_error(
  sim <- simulate_rc_data(sim_spec),
  NA
)

expect_equal(
  names(sim),
  c("sim_spec","data")
)

expect_equal(
  names(sim$data),
  c("dates","rc_meas")
)

expect_equal(
  length(sim$data$dates),
  N
)

# Check simulation of a truncated Gaussian mixture with a random number seed
# set
sim_spec <- list(model_spec=
                   list(density_type = "trunc_gauss_mix",
                        th=c(th_sim,tau_min,tau_max),
                        error_spec=list(type="unif_fm",min=.0021,max=.0028),
                        is_AD=T),
                 N=N,
                 calib_curve="intcal20",
                 seed=1002)

expect_error(
  sim <- simulate_rc_data(sim_spec),
  NA
)

expect_equal(
  names(sim),
  c("sim_spec","data")
)

expect_equal(
  names(sim$data),
  c("dates","rc_meas")
)

expect_equal(
  length(sim$data$dates),
  N
)

# ------------------------------------------------------------------------------
# (2) Do unit tests for functions related to the maximum likelihood fitting
#
# coverage: is_th_reduced_valid
#           calc_trunc_gauss_mix_neg_log_lik
#           fit_trunc_gauss_mix
#           init_trunc_gauss_mix
# ------------------------------------------------------------------------------

# (2a) is_th_reduced_valid
expect_equal(
  is_th_reduced_valid(c(0.5,10,20,2,3)),
  TRUE
)

# Fails for pi
expect_equal(
  is_th_reduced_valid(c(1.5,10,20,2,3)),
  FALSE
)

# Fails for mu
expect_equal(
  is_th_reduced_valid(c(0.5,20,10,2,3)),
  FALSE
)

# Fails for s
expect_equal(
  is_th_reduced_valid(c(0.5,10,20,2,3),s_min=10),
  FALSE
)

# (2b) calc_trunc_gauss_mix_neg_log_lik
tau <- seq(tau_min,tau_max,by=5)
M <- calc_meas_matrix(tau,
                      sim$data$rc_meas$phi_m,
                      sim$data$rc_meas$sig_m,
                      calib_df)
expect_error(
  neg_log_lik <- calc_trunc_gauss_mix_neg_log_lik(th_sim[-1],M,tau),
  NA
)

expect_equal(
  is.na(neg_log_lik),
  FALSE
)
expect_equal(
  is.finite(neg_log_lik),
  TRUE
)

expect_equal(
  calc_trunc_gauss_mix_neg_log_lik(th_sim[-1],M,tau,s_min=100),
  Inf
)

# (2c) fit_trunc_gauss_mix
# Check that fit_trunc_gauss_mix does not throw an error (and also that the
# output has the right fields). Do this for both not using and using multiple
# cores. Also check the behavior and reproducibility when setting the random
# number seed(s) and that r_max can be used.

expect_error(
  max_lik_fit <-
    fit_trunc_gauss_mix(
                           2,
                           sim$data$rc_meas$phi_m,
                           sim$data$rc_meas$sig_m,
                           600,
                           1300,
                           1,
                           calib_df,
                           num_restarts=3),
  NA
)

expect_equal(
  names(max_lik_fit),
  c("th","neg_log_lik","tau","f","bic","aic","hjkb_fit_list",
    "input_seed","base_seed","seed_vect")
)

expect_error(
  max_lik_fit_a <-
    fit_trunc_gauss_mix(
                           2,
                           sim$data$rc_meas$phi_m,
                           sim$data$rc_meas$sig_m,
                           600,
                           1300,
                           1,
                           calib_df,
                           num_restarts=3,
                           input_seed=10),
  NA
)

expect_error(
  max_lik_fit_b <-
    fit_trunc_gauss_mix(
                           2,
                           sim$data$rc_meas$phi_m,
                           sim$data$rc_meas$sig_m,
                           600,
                           1300,
                           1,
                           calib_df,
                           num_restarts=3,
                           input_seed=10),
  NA
)

expect_equal(
  max_lik_fit_a,
  max_lik_fit_b
)

expect_error(
  max_lik_fit_a <-
    fit_trunc_gauss_mix(
                           2,
                           sim$data$rc_meas$phi_m,
                           sim$data$rc_meas$sig_m,
                           600,
                           1300,
                           1,
                           calib_df,
                           num_restarts=3,
                           input_seed=10:13),
  NA
)

expect_error(
  max_lik_fit_b <-
    fit_trunc_gauss_mix(
                           2,
                           sim$data$rc_meas$phi_m,
                           sim$data$rc_meas$sig_m,
                           600,
                           1300,
                           1,
                           calib_df,
                           num_restarts=3,
                           input_seed=10:13),
  NA
)

expect_equal(
  max_lik_fit_a,
  max_lik_fit_b
)

expect_error(
  max_lik_fit <-
    fit_trunc_gauss_mix(
                        2,
                        sim$data$rc_meas$phi_m,
                        sim$data$rc_meas$sig_m,
                        600,
                        1300,
                        1,
                        calib_df,
                        num_restarts=3,
                        input_seed=10:12),
  paste0("input_seed must be NA, a single integer, or a vector of ",
         "integers with length 1 + num_restarts"),
  fixed = TRUE
)

# Multiple cores
expect_error(
  max_lik_fit <-
    fit_trunc_gauss_mix(2,
                        sim$data$rc_meas$phi_m,
                        sim$data$rc_meas$sig_m,
                        600,
                        1300,
                        1,
                        calib_df,
                        num_restarts=3,
                        num_cores=2),
  NA
)

expect_equal(
  names(max_lik_fit),
  c("th","neg_log_lik","tau","f","bic","aic","hjkb_fit_list",
    "input_seed","base_seed","seed_vect")
)

expect_error(
  max_lik_fit_a <-
    fit_trunc_gauss_mix(
                           2,
                           sim$data$rc_meas$phi_m,
                           sim$data$rc_meas$sig_m,
                           600,
                           1300,
                           1,
                           calib_df,
                           num_restarts=3,
                           num_cores=2,
                           input_seed=10),
  NA
)

expect_error(
  max_lik_fit_b <-
    fit_trunc_gauss_mix(
                           2,
                           sim$data$rc_meas$phi_m,
                           sim$data$rc_meas$sig_m,
                           600,
                           1300,
                           1,
                           calib_df,
                           num_restarts=3,
                           num_cores=2,
                           input_seed=10),
  NA
)

expect_equal(
  max_lik_fit_a,
  max_lik_fit_b
)

expect_error(
  max_lik_fit_a <-
    fit_trunc_gauss_mix(
                           2,
                           sim$data$rc_meas$phi_m,
                           sim$data$rc_meas$sig_m,
                           600,
                           1300,
                           1,
                           calib_df,
                           num_restarts=3,
                           num_cores=2,
                           input_seed=10:13),
  NA
)

expect_error(
  max_lik_fit_b <-
    fit_trunc_gauss_mix(
                           2,
                           sim$data$rc_meas$phi_m,
                           sim$data$rc_meas$sig_m,
                           600,
                           1300,
                           1,
                           calib_df,
                           num_restarts=3,
                           num_cores=2,
                           input_seed=10:13),
  NA
)

expect_equal(
  max_lik_fit_a,
  max_lik_fit_b
)

expect_error(
  max_lik_fit <-
    fit_trunc_gauss_mix(
                        2,
                        sim$data$rc_meas$phi_m,
                        sim$data$rc_meas$sig_m,
                        600,
                        1300,
                        1,
                        calib_df,
                        num_restarts=3,
                        num_cores=2,
                        input_seed=10:12),
  paste0("input_seed must be NA, a single integer, or a vector of ",
         "integers with length 1 + num_restarts"),
  fixed = TRUE
)

# Check that r_max can be used (though function is not checked, do check that
# a different fit results)
expect_error(
  max_lik_fit_c <-
    fit_trunc_gauss_mix(
                           2,
                           sim$data$rc_meas$phi_m,
                           sim$data$rc_meas$sig_m,
                           600,
                           1300,
                           1,
                           calib_df,
                           r_max=.001,
                           num_restarts=3,
                           num_cores=2,
                           input_seed=10:13),
  NA
)

expect_equal(
  all(max_lik_fit_b$th == max_lik_fit_c$th),
  FALSE
)

# (2d) init_trunc_gauss_mix

# Case:
# num_draws    1
# s_min        Not set
# input_seed   Not set
expect_error(
  th0 <- init_trunc_gauss_mix(2,
                              1,
                              600,
                              1300),
  NA
)

expect_equal(
  length(th0),
  6
)

expect_equal(
  any(is.na(th0)),
  FALSE
)

# Case:
# num_draws    1
# s_min        Set
# input_seed   Set
expect_error(
  th0 <- init_trunc_gauss_mix(2,
                              1,
                              600,
                              1300,
                              s_min=600,
                              input_seed=20),
  NA
)

expect_equal(
  length(th0),
  6
)

expect_equal(
  any(is.na(th0)),
  FALSE
)

expect_equal(
  all(th0[5:6] >= 600),
  TRUE
)

expect_equal(
  all(th0[5:6] <= 700),
  TRUE
)

# Case: [same as previous to check reproducibility]
# num_draws    1
# s_min        Set
# input_seed   Set
expect_error(
  th0_repeat <- init_trunc_gauss_mix(2,
                                     1,
                                     600,
                                     1300,
                                     s_min=600,
                                     input_seed=20),
  NA
)

expect_equal(
  th0,
  th0_repeat
)

# Case:
# num_draws    5
# s_min        Not set
# input_seed   Not set
expect_error(
  TH0 <- init_trunc_gauss_mix(2,
                              5,
                              600,
                              1300),
  NA
)

expect_equal(
  dim(TH0),
  c(6,5)
)

expect_equal(
  any(is.na(TH0)),
  FALSE
)

# Case:
# num_draws    5
# s_min        Set
# input_seed   Set
expect_error(
  TH0 <- init_trunc_gauss_mix(2,
                              5,
                              600,
                              1300,
                              s_min=600,
                              input_seed=30),
  NA
)

expect_equal(
  dim(TH0),
  c(6,5)
)

expect_equal(
  any(is.na(TH0)),
  FALSE
)

expect_equal(
  all(TH0[5:6,] >= 600),
  TRUE
)

expect_equal(
  all(TH0[5:6,] <= 700),
  TRUE
)

# Case: [same as previous to check reproducibility]
# num_draws    5
# s_min        Set
# input_seed   Set
expect_error(
  TH0_repeat <- init_trunc_gauss_mix(2,
                                     5,
                                     600,
                                     1300,
                                     s_min=600,
                                     input_seed=30),
  NA
)

expect_equal(
  TH0,
  TH0_repeat
)

# Case: [same as previous, but with a different seed to check lack of
#        reproducibility]
# num_draws    5
# s_min        Set
# input_seed   Set
expect_error(
  TH0_repeat <- init_trunc_gauss_mix(2,
                                     5,
                                     600,
                                     1300,
                                     s_min=600,
                                     input_seed=40),
  NA
)

expect_equal(
  isTRUE(all.equal(TH0,TH0_repeat)),
  FALSE
)

# ------------------------------------------------------------------------------
# (3) Do unit tests for functions related to the Bayesian inference
#
# coverage: sample_theta
#           summarize_bayesian_inference
# ------------------------------------------------------------------------------


# (3a) sample_theta
# Define the density model
density_model <- list(type="trunc_gauss_mix",
                      tau_min=600,
                      tau_max=1300,
                      K=2)

# Define the hyperparameters
hp <-
  list(
    # Parameter for the dirichlet draw of the mixture probabilities
    alpha_d = 1,
    # The gamma distribution shape parameter for sigma
    alpha_s = 3,
    # The gamma distribution rate parameter for sigma, yielding a mode of 300
    alpha_r = (3 - 1) / 300,
    # Spacing for the measurement matrix (years)
    dtau = 5
  )

input_control <- list(num_chains=2,
                      samps_per_chain=1800,
                      warmup=600,
                      stan_control=list(adapt_delta=.99),
                      mask=FALSE)
expect_error(
  soln <- sample_theta(
    sim$data$rc_meas,
    density_model,
    hp,
    calib_df,
    control=input_control
  ),
  NA
)

expect_is(
  soln,
  "bd_bayesian_soln"
)

expect_equal(
  names(soln),
  c("fit",
    "final_th0",
    "final_init_seed",
    "final_stan_seed",
    "final_control",
    "optional_inputs")
)

expect_is(
  soln$fit,
  "stanfit"
)

expect_equal(
  soln$final_control,
  input_control
)

expect_equal(
  soln$optional_inputs,
  list(th0=NA,init_seed=NA,stan_seed=NA,control=input_control)
)

# Check that the simulation is reproducible when init_seed and stan_seed are
# used
expect_error(
  soln2 <- sample_theta(
    sim$data$rc_meas,
    density_model,
    hp,
    calib_df,
    init_seed=soln$final_init_seed,
    stan_seed=soln$final_stan_seed,
    control=input_control
  ),
  NA
)

expect_equal(
  soln$final_th0,
  soln2$final_th0,
)

expect_equal(
  extract_param(soln$fit),
  extract_param(soln2$fit)
)

# Check that the inference succeeds if th0 is directly set
expect_error(
  soln <- sample_theta(
    sim$data$rc_meas,
    density_model,
    hp,
    calib_df,
    th0=th_sim,
    control=input_control
  ),
  NA
)

# Check that an error is thrown if th0 and init_seed are both provided
expect_error(
  sample_theta(
    sim$data$rc_meas,
    density_model,
    hp,
    calib_df,
    init_seed=1000,
    th0=th_sim,
    control=input_control
  ),
  "init_seed should not be provided if th0 is provided"
)

# Check that an error is thrown if an invalid parameter is passed in control
expect_error(
  sample_theta(
    sim$data$rc_meas,
    density_model,
    hp,
    calib_df,
    control=list(invalid=NA)
  ),
  "Unsupported named parameter in control = invalid"
)

# Check that sampling works if mask is TRUE
input_control <- list(num_chains=2,
                      samps_per_chain=1800,
                      warmup=600,
                      stan_control=list(adapt_delta=.99),
                      mask=TRUE)
expect_error(
  soln <- sample_theta(
    sim$data$rc_meas,
    density_model,
    hp,
    calib_df,
    calibration_curve="intcal20",
    control=input_control
  ),
  NA
)

expect_is(
  soln,
  "bd_bayesian_soln"
)

expect_equal(
  names(soln),
  c("fit",
    "final_th0",
    "final_init_seed",
    "final_stan_seed",
    "final_control",
    "optional_inputs")
)

expect_is(
  soln$fit,
  "stanfit"
)

expect_equal(
  soln$final_control,
  input_control
)

expect_equal(
  soln$optional_inputs,
  list(th0=NA,init_seed=NA,stan_seed=NA,control=input_control)
)

# (3b) summarize_bayesian_inference
expect_error(
  summary <- summarize_bayesian_inference(soln,
                                          rc_meas,
                                          density_model,
                                          calib_df),
  NA
)

expect_is(
  summary,
  "bd_bayesian_summary"
)

expect_equal(
  names(summary),
  c("tau","f_spdf","Qdens","Qrate","probs",
    "rate_prop","rate_ind","growth_state","summ_list")
)

expect_equal(
  dim(summary$Qdens),
  c(3,length(seq(density_model$tau_min,density_model$tau_max,by=5)))
)

# also check summarize_bayesian_inference when th_sim is used
expect_error(
  summary <- summarize_bayesian_inference(soln,
                                          rc_meas,
                                          density_model,
                                          calib_df,
                                          th_sim=th_sim),
  NA
)

expect_is(
  summary,
  "bd_bayesian_summary"
)

expect_equal(
  names(summary),
  c("tau","f_spdf","Qdens","Qrate","probs",
    "rate_prop","rate_ind","growth_state","summ_list",
    "f_sim","rate_sim")
)

expect_equal(
  dim(summary$Qdens),
  c(3,length(seq(density_model$tau_min,density_model$tau_max,by=5)))
)

# ------------------------------------------------------------------------------
# (4) Do unit tests for plotting functions
#
# coverage: make_blank_density_plot
#           plot_50_percent_quantile
#           add_shaded_quantiles
#           plot_known_sim_density
#           plot_summed_density
#           vis_calib_curve
# ------------------------------------------------------------------------------

# (4a) make_blank_density_plot
expect_error(
  make_blank_density_plot(summary),
  NA
)

# (4b) plot_50_percent_quantile
expect_error(
  plot_50_percent_quantile(summary, add = T),
  NA
)

# (4c) add_shaded_quantiles
expect_error(
  add_shaded_quantiles(summary),
  NA
)

# (4d) plot_known_sim_density
#      plot_summed_density
expect_error(
  plot_known_sim_density(summary, add=T),
  NA
)

expect_error(
  plot_summed_density(summary, add=T),
  NA
)

expect_error(
  plot_known_sim_density(summary),
  NA
)

expect_error(
  plot_summed_density(summary),
  NA
)

# (4e) vis_calib_curve
expect_error(
  vis_calib_curve(600, 700, calib_df),
  NA
)

# ------------------------------------------------------------------------------
# (5) Do unit tests for density calculations and related "helper functions"
#
# coverage: extract_param
#           calc_gauss_mix_pdf
#           calc_gauss_mix_pdf_mat
#           calc_quantiles
#           calc_half_life_from_peak
#           calc_relative_density
#             unpack_spec        [indirect]
#             calc_point_density [indirect]
#             calc_range_density [indirect]
#             calc_peak_density  [indirect]
#           summarize_trunc_gauss_mix_sample
# ------------------------------------------------------------------------------

# (5a) extract_param
total_samples <- 2400
expect_error(
  TH <- extract_param(soln$fit),
  NA
)

expect_equal(
  dim(TH),
  c(total_samples,6)
)

expect_error(
  extract_param(10),
  paste("Expected fit to be class stanfit, but it is", class(10))
)

# (5b) calc_gauss_mix_pdf
tau <- seq(density_model$tau_min,density_model$tau_max,by=hp$dtau)

# density is the default type
expect_error(
  pdf_vect <- calc_gauss_mix_pdf(TH[50, ], tau),
  NA
)

expect_equal(
  length(tau),
  length(pdf_vect)
)

expect_equal(
  any(is.na(pdf_vect)),
  FALSE
)

expect_error(
  cdf_vect <- calc_gauss_mix_pdf(TH[50, ], tau, type = "cumulative"),
  NA
)

expect_equal(
  length(tau),
  length(cdf_vect)
)

expect_equal(
  any(is.na(cdf_vect)),
  FALSE
)

expect_error(
  deriv_vect <- calc_gauss_mix_pdf(TH[50, ], tau, type = "derivative"),
  NA
)

expect_equal(
  length(tau),
  length(deriv_vect)
)

expect_equal(
  any(is.na(deriv_vect)),
  FALSE
)

expect_error(
  rate_vect <- calc_gauss_mix_pdf(TH[50, ], tau, type = "rate"),
  NA
)

expect_equal(
  length(tau),
  length(rate_vect)
)

expect_equal(
  any(is.na(rate_vect)),
  FALSE
)

# (5c) calc_gauss_mix_pdf_mat

# density is the default type
expect_error(
  pdf_mat <- calc_gauss_mix_pdf_mat(TH, tau),
  NA
)

expect_equal(
  c(nrow(TH), length(tau)),
  dim(pdf_mat)
)

expect_equal(
  any(is.na(pdf_mat)),
  FALSE
)

expect_error(
  cdf_mat <- calc_gauss_mix_pdf_mat(TH, tau, type = "cumulative"),
  NA
)

expect_equal(
  c(nrow(TH), length(tau)),
  dim(cdf_mat)
)

expect_equal(
  any(is.na(cdf_mat)),
  FALSE
)

expect_error(
  deriv_mat <- calc_gauss_mix_pdf_mat(TH, tau, type = "derivative"),
  NA
)

expect_equal(
  c(nrow(TH), length(tau)),
  dim(deriv_mat)
)

expect_equal(
  any(is.na(deriv_mat)),
  FALSE
)

expect_error(
  rate_mat <- calc_gauss_mix_pdf_mat(TH, tau, type = "rate"),
  NA
)

expect_equal(
  c(nrow(TH), length(tau)),
  dim(rate_mat)
)

expect_equal(
  any(is.na(rate_mat)),
  FALSE
)

# (5d) calc_quantiles

expect_error(
  Qdens1 <- calc_quantiles(pdf_mat),
  NA
)

expect_equal(
  dim(Qdens1),
  c(3, ncol(pdf_mat))
)

expect_error(
  Qdens2 <- calc_quantiles(pdf_mat, c(.025, .05, .5, .90, .975)),
  NA
)

expect_equal(
  dim(Qdens2),
  c(5, ncol(pdf_mat))
)

# (5e) calc_half_life_from_peak

# Check when rc_meas and calib_df are provided
expect_error(
  half_life1 <- calc_half_life_from_peak(soln,
                                         density_model,
                                         rc_meas=rc_meas,
                                         calib_df=calib_df),
  NA
)

expect_equal(
  length(half_life1),
  total_samples
)

expect_equal(
  any(is.na(half_life1)),
  FALSE
)

expect_error(
  half_life2 <- calc_half_life_from_peak(soln,
                                         density_model,
                                         rc_meas=rc_meas,
                                         calib_df=calib_df,
                                         prop_chang=.25),
  NA
)

expect_equal(
  length(half_life2),
  total_samples
)

expect_equal(
  any(is.na(half_life2)),
  FALSE
)

expect_equal(
  all(half_life1 < half_life2),
  TRUE
)

# Check when bayesian_summary provided
expect_error(
  half_life1 <- calc_half_life_from_peak(soln,
                                         density_model,
                                         bayesian_summary=summary),
  NA
)

expect_equal(
  length(half_life1),
  total_samples
)

expect_equal(
  any(is.na(half_life1)),
  FALSE
)

expect_error(
  half_life2 <- calc_half_life_from_peak(soln,
                                         density_model,
                                         bayesian_summary=summary,
                                         prop_chang=.25),
  NA
)

expect_equal(
  length(half_life2),
  total_samples
)

expect_equal(
  any(is.na(half_life2)),
  FALSE
)

expect_equal(
  all(half_life1 < half_life2),
  TRUE
)

# Check errors related to the two ways the input(s) can be specified
expect_error(
  half_life1 <- calc_half_life_from_peak(soln,
                                         density_model),
  "rc_meas must be provided if bayesian_summary is not provided"
)

expect_error(
  half_life1 <- calc_half_life_from_peak(soln,
                                         density_model,
                                         rc_meas=rc_meas),
  "calib_df must be provided if bayesian_summary is not provided"
)

# (5f) calc_relative_density
#        unpack_spec        [indirect]
#        calc_point_density [indirect]
#        calc_range_density [indirect]
#        calc_peak_density  [indirect]
# Check two calls to calc_relative_density to check all four helper
# functions (which are only checked indirectly).

# Check calculation with peak when rc_meas and calib_df are provided
expect_error(
  rel_dens <- calc_relative_density(soln,
                                    density_model,
                                    "peak",
                                    1100,
                                    rc_meas=rc_meas,
                                    calib_df=calib_df),
  NA
)

expect_equal(
  length(rel_dens),
  total_samples
)

expect_equal(
  any(is.na(rel_dens)),
  FALSE
)

# Check calculation with peak when bayesian_summary is provided
expect_error(
  rel_dens <- calc_relative_density(soln,
                                    density_model,
                                    "peak",
                                    1100,
                                    bayesian_summary=summary),
  NA
)

expect_equal(
  length(rel_dens),
  total_samples
)

expect_equal(
  any(is.na(rel_dens)),
  FALSE
)

# Check errors related to the two ways the input(s) can be specified
# Check calculation with peak when bayesian_summary is provided
expect_error(
  calc_relative_density(soln,
                        density_model,
                        "peak",
                        1100),
  "rc_meas must be provided if bayesian_summary is not provided"
)

expect_error(
  calc_relative_density(soln,
                        density_model,
                        "peak",
                        1100,
                        rc_meas=rc_meas),
  "calib_df must be provided if bayesian_summary is not provided"
)

# Check calculation without peak when rc_meas and calib_df are provided
expect_error(
  rel_dens <- calc_relative_density(soln,
                                    density_model,
                                    900,
                                    c(700, 750),
                                    rc_meas=rc_meas,
                                    calib_df=calib_df),
  NA
)

expect_equal(
  length(rel_dens),
  total_samples
)

expect_equal(
  any(is.na(rel_dens)),
  FALSE
)

# (5g) summarize_trunc_gauss_mix_sample
expect_error(
  summ <- summarize_trunc_gauss_mix_sample(TH[50, ],
                                           density_model$tau_min,
                                           density_model$tau_max),
  NA
)

expect_equal(
  names(summ),
  c("periods","ind_peak","t_peak","f_peak","pattern")
)

# ------------------------------------------------------------------------------
# (6) Do unit tests for the measurement matrix calculation
#
# coverage: calc_meas_matrix
#           calc_trapez_weights
# ------------------------------------------------------------------------------


# (1) Calculate the measurement matrix using tau, already defined above, which
#     is regularly spaced. Do this for both using and not using calibration
#     uncertainty. Also check the dimensions of the output

# (6a) calc_meas_matrix
#        tau regularly spaced
#        with calibration uncertainty
expect_error(
  M6a <- calc_meas_matrix(tau,
                          rc_meas$phi_m,
                          rc_meas$sig_m,
                          calib_df,
                          add_calib_unc=T),
  NA
)

expect_equal(
  dim(M6a),
  c(length(rc_meas$phi_m), length(tau)),
)

expect_equal(
  any(is.na((M6a))),
  FALSE
)

# (6b) calc_meas_matrix
#        tau regularly spaced
#        without calibration uncertainty
expect_error(
  M6b <- calc_meas_matrix(tau,
                          rc_meas$phi_m,
                          rc_meas$sig_m,
                          calib_df,
                          add_calib_unc=F),
  NA
)

expect_equal(
  dim(M6b),
  c(length(rc_meas$phi_m), length(tau)),
)

expect_equal(
  any(is.na((M6b))),
  FALSE
)

# (6c) calc_meas_matrix
#        tau irrregularly spaced
#        with calibration uncertainty
tau_irreg <- c(800, 805, 810, 825)

# an error is expected if useTrapez is not set to TRUE
expect_error(
  calc_meas_matrix(tau_irreg,
                   rc_meas$phi_m,
                   rc_meas$sig_m,
                   calib_df,
                   add_calib_unc=T),
  "tau is irregularly spaced but useTrapez is FALSE"
)

expect_error(
  M6c <- calc_meas_matrix(
    tau_irreg,
    rc_meas$phi_m,
    rc_meas$sig_m,
    calib_df,
    add_calib_unc=T,
    use_trapez=T),
  NA
)

expect_equal(
  dim(M6c),
  c(length(rc_meas$phi_m),length(tau_irreg)),
)

expect_equal(
  any(is.na((M6c))),
  FALSE
)

# (6d) calc_meas_matrix
#        tau irrregularly spaced
#        without calibration uncertainty

# an error is expected if useTrapez is not set to TRUE
expect_error(
  calc_meas_matrix(tau_irreg,
                   rc_meas$phi_m,
                   rc_meas$sig_m,
                   calib_df,
                   add_calib_unc=F),
  "tau is irregularly spaced but useTrapez is FALSE"
)

expect_error(
  M6d <- calc_meas_matrix(
    tau_irreg,
    rc_meas$phi_m,
    rc_meas$sig_m,
    calib_df,
    add_calib_unc=F,
    use_trapez=T),
  NA
)

expect_equal(
  dim(M6d),
  c(length(rc_meas$phi_m),length(tau_irreg)),
)

expect_equal(
  any(is.na((M6d))),
  FALSE
)

# (6e) calc_trapez_weights
expect_equal(
  calc_trapez_weights(c(-1.5, 2, 3, 4, 7)),
  c(1.75, 2.25, 1, 2, 1.5)
)

# ------------------------------------------------------------------------------
# (7) Do unit tests for identifiability functions
#
# coverage: assess_calib_curve_equif
#             phi2tau [indirect]
#           calc_calib_curve_equif_dates
#           calc_calib_curve_frac_modern [now located in radiocarbon_functions.R]
# ------------------------------------------------------------------------------

# (7a) assess_calib_curve_equif
#        phi2tau [indirect]
#      calc_calib_curve_equif_dates
expect_error(
  equif_result1 <- assess_calib_curve_equif(calib_df),
  NA
)

expect_equal(
  names(equif_result1),
  c("inv_span_list","can_invert")
)

expect_equal(
  length(equif_result1$can_invert),
  nrow(calib_df)
)

expect_error(
  equif_dates <- calc_calib_curve_equif_dates(calib_df),
  NA
)

expect_error(
  equif_result2 <- assess_calib_curve_equif(calib_df, equif_dates),
  NA
)

expect_equal(
  names(equif_result2),
  c("inv_span_list","can_invert")
)

expect_equal(
  length(equif_result2$can_invert),
  nrow(calib_df)
)

expect_equal(
  equif_result1,
  equif_result2
)

# (7b) calc_calib_curve_frac_modern

expect_error(
  phi <- calc_calib_curve_frac_modern(calib_df),
  NA
)

expect_equal(
  phi,
  exp(-calib_df$uncal_year_BP / 8033)
)

tau2 <- c(600, 602, 805.89)
expect_error(
  phi2 <- calc_calib_curve_frac_modern(calib_df, tau2),
  NA
)

expect_equal(
  length(phi2),
  length(tau2)
)

# ------------------------------------------------------------------------------
# (8) Do unit tests for truncated exponential model
#
# coverage: sample_trunc_exp
# ------------------------------------------------------------------------------
expect_error(
  exp_samp1 <- sample_trunc_exp(50, 0.01, 600, 1300),
  NA
)

expect_equal(
  length(exp_samp1),
  50
)

expect_error(
  exp_samp2 <- sample_trunc_exp(50, -0.01, 600, 1300),
  NA
)

expect_equal(
  length(exp_samp2),
  50
)

# ------------------------------------------------------------------------------
# (9) Do unit tests for data io (input/output) functions
#
# coverage: import_rc_data         (9a)
#           set_rc_meas            (9b)
#           set_sim                (9c)
#           calc_tau_range         (9d)
#           set_density_model      (9e)
#           set_calib_curve        (9f)
#           do_bayesian_inference  (9g)
#           do_bayesian_summary    (9h)
#           plot_best_solution     (9i)
# ------------------------------------------------------------------------------

# (9a) import_rc_data

# Specifying phi_m and sig_m with named columns
file_name <- system.file("extdata",
                         "fraction_modern_named_columns.csv",
                         package = "baydem")

expect_error(
  rc_meas <- import_rc_data(file_name,
                            phi_m_col="fraction_modern",
                            sig_m_col="fraction_modern_error"),
  NA
)

phi_m <- c(.80,.75,.74,.82)
sig_m <- c(.001,.002,.002,.001)
rc_meas_direct <- list(phi_m=phi_m,
                       sig_m=sig_m,
                       trc_m=-8033 * log(phi_m),
                       sig_trc_m=8033 * sig_m / phi_m)

expect_equal(
  rc_meas,
  rc_meas_direct
)

# Specifying an invalid set of inputs
expect_error(
  rc_meas <- import_rc_data(file_name,
                            trc_m_col="this_should_fail",
                            phi_m_col="fraction_modern",
                            sig_m_col="fraction_modern_error"),
  "Unsupported input pattern for specifying data columns"
)

# Specifying phi_m and sig_m with column numbers
expect_error(
  rc_meas <- import_rc_data(file_name,
                            phi_m_col=1,
                            sig_m_col=2),
  NA
)

expect_equal(
  rc_meas,
  rc_meas_direct
)

# Specifying trc_m and sig_trc_m with named columns
file_name <- system.file("extdata",
                         "rc_years_BP_named_columns.csv",
                         package = "baydem")

expect_error(
  rc_meas <- import_rc_data(file_name,
                            trc_m_col="rc_years_BP",
                            sig_trc_m_col="rc_years_BP_error"),
  NA
)

trc_m     <- c(900,800,850)
sig_trc_m <- c( 20,400, 70)
phi_m <- exp(-trc_m/8033)
sig_m <- phi_m * sig_trc_m / 8033
rc_meas_direct <- list(phi_m=phi_m,
                       sig_m=sig_m,
                       trc_m=trc_m,
                       sig_trc_m=sig_trc_m)

expect_equal(
  rc_meas,
  rc_meas_direct
)

# Specifying trc_m and sig_trc_m with column numbers and skipping the column
# names but specying header=F (to test the functioning of ...)
expect_error(
  rc_meas <- import_rc_data(file_name,
                            trc_m_col=1,
                            sig_trc_m_col=2,
                            skip=1,
                            header=F),
  NA
)

expect_equal(
  rc_meas,
  rc_meas_direct
)

# Specifying no columns, for which it is assumed that the first and second
# columns are, respectively, trc_m and sig_trc_m.

expect_error(
  rc_meas <- import_rc_data(file_name),
  NA
)

expect_equal(
  rc_meas,
  rc_meas_direct
)

# (9b) set_rc_meas

# Call data_dir to get the temporary directory to use in testing
data_dir <- tempdir()

rc_meas <- sim$data$rc_meas

analysis_name <- "test_analysis"
path_to_analysis_file <- file.path(data_dir,paste0(analysis_name,".rds"))

# If the analysis file exists, delete it
if (file.exists(path_to_analysis_file)) {
  success <- file.remove(path_to_analysis_file)
}

expect_error(
  set_rc_meas(data_dir,analysis_name,rc_meas),
  NA
)

# Make sure the file was created and the stored variable is correct
expect_equal(
  file.exists(path_to_analysis_file),
  T
)

expect_equal(
  readRDS(path_to_analysis_file),
  list(rc_meas=rc_meas)
)

# Make sure that we cannot set rc_meas for an analysis that already exists
expect_error(
  set_rc_meas(data_dir,analysis_name,rc_meas),
  "A save file for analysis_name already exists in data_dir"
)

# (9c) set_sim
sim_analysis_name <- "sim"
path_to_sim_analysis_file <-
  file.path(data_dir,paste0(sim_analysis_name,".rds"))

# If the analysis file exists, delete it
if (file.exists(path_to_sim_analysis_file)) {
  success <- file.remove(path_to_sim_analysis_file)
}

expect_error(
  set_sim(data_dir,sim_analysis_name,sim),
  NA
)

# Make sure the file was created and the stored variable is correct
expect_equal(
  file.exists(path_to_sim_analysis_file),
  T
)

expect_equal(
  readRDS(path_to_sim_analysis_file),
  list(rc_meas=sim$data$rc_meas,calib_df=calib_df,sim=sim)
)

# Make sure that we cannot set a simulation for an analysis that already exists
expect_error(
  set_sim(data_dir,sim_analysis_name,sim),
  "A save file for analysis_name already exists in data_dir"
)

# (9d) calc_tau_range

# The tau range should be the same across test runs since all relative random
# number seeds are set above
expect_error(
  tau_range <- calc_tau_range(rc_meas),
  NA
)

expect_equal(
  tau_range,
  list(tau_min=603,tau_max=1265)
)

# Using dtau=1 should have no effect
expect_error(
  tau_range <- calc_tau_range(rc_meas,dtau=1),
  NA
)

expect_equal(
  tau_range,
  list(tau_min=603,tau_max=1265)
)

# A warning should be thrown if dtau is not an integer
expect_warning(
  tau_range <- calc_tau_range(rc_meas,dtau=1.5),
  "dtau is being ignored because it is not an integer"
)

expect_equal(
  tau_range,
  list(tau_min=603,tau_max=1265)
)

# Check that the range is extended if dtau=5 and dtau=7
expect_error(
  tau_range <- calc_tau_range(rc_meas,dtau=5),
  NA
)

expect_equal(
  tau_range,
  list(tau_min=600,tau_max=1265)
)

expect_error(
  tau_range <- calc_tau_range(rc_meas,dtau=7),
  NA
)

expect_equal(
  tau_range,
  list(tau_min=602,tau_max=1267)
)
# (9e) set_density_model

# First check the error messages. The order they are checked is not the same
# order they would be encountered in reading through the source code.

empty_density_model <- list()
bad_analysis_name <- "not_yet_defined"
expect_error(
  set_density_model(data_dir,bad_analysis_name,density_model),
  "A save file for analysis_name does not exist in data_dir"
)

expect_error(
  set_density_model(data_dir,analysis_name,empty_density_model),
  "type must be a named field in the list density_model"
)

invalid_density_model <- list(type="invalid_type")
expect_error(
  set_density_model(data_dir,analysis_name,invalid_density_model),
  "Unsupported type for density_model"
)

# no tau_min
invalid_density_model <- list(type="trunc_gauss_mix",
                              tau_max=1300,
                              K=4)
expect_error(
  set_density_model(data_dir,analysis_name,invalid_density_model),
  "tau_min must be a field in density_model for trunc_gauss_mix"
)

# no tau_max
invalid_density_model <- list(type="trunc_gauss_mix",
                              tau_min=600,
                              K=4)
expect_error(
  set_density_model(data_dir,analysis_name,invalid_density_model),
  "tau_max must be a field in density_model for trunc_gauss_mix"
)

# no K
invalid_density_model <- list(type="trunc_gauss_mix",
                              tau_min=600,
                              tau_max=1300)
expect_error(
  set_density_model(data_dir,analysis_name,invalid_density_model),
  "K must be a field in density_model for trunc_gauss_mix"
)

# tau_max less than tau_min
invalid_density_model <- list(type="trunc_gauss_mix",
                              tau_min=1300,
                              tau_max=600,
                              K=4)
expect_error(
  set_density_model(data_dir,analysis_name,invalid_density_model),
  "tau_min must be less than tau_max"
)

# Make a correct call to update the saved .rds file, check the result,  and make
# sure that an error is thrown if an attempt is made to reset the density_model.
density_model <- list(type="trunc_gauss_mix",
                              tau_min=600,
                              tau_max=1300,
                              K=2:3)
expect_error(
  set_density_model(data_dir,analysis_name,density_model),
  NA
)

expect_equal(
  readRDS(path_to_analysis_file),
  list(rc_meas=rc_meas,density_model=density_model)
)

expect_error(
  set_density_model(data_dir,analysis_name,density_model),
  "A density model has already been defined for this analysis"
)

# (9f) set_calib_curve
expect_error(
  set_calib_curve(data_dir,bad_analysis_name),
  "A save file for analysis_name does not exist in data_dir"
)

expect_error(
  set_calib_curve(data_dir,analysis_name,"intcal20"),
  NA
)

analysis <- readRDS(path_to_analysis_file)
expect_equal(
  names(analysis),
  c("rc_meas","density_model","calib_df")
)

calib_df <- load_calib_curve("intcal20")
expect_equal(
  analysis$calib_df,
  calib_df
)

# Directly delete calib_df and ensure from the .rds save file and check that it
# can also be set with a dataframe as input

analysis["calib_df"] <- NULL
saveRDS(analysis,path_to_analysis_file)
expect_error(
  set_calib_curve(data_dir,analysis_name,calib_df),
  NA
)

analysis <- readRDS(path_to_analysis_file)
expect_equal(
  names(analysis),
  c("rc_meas","density_model","calib_df")
)

calib_df <- load_calib_curve("intcal20")
expect_equal(
  analysis$calib_df,
  calib_df
)

# (9g) do_bayesian_inference
expect_error(
  do_bayesian_inference(data_dir,bad_analysis_name,hp),
  "A save file for analysis_name does not exist in data_dir"
)

bad_analysis_name <- "bad"
path_to_bad_analysis_file <- file.path(data_dir,
                                       paste0(bad_analysis_name,".rds"))
saveRDS(list(),path_to_bad_analysis_file)
expect_error(
  do_bayesian_inference(data_dir,bad_analysis_name,hp),
  "Radiocarbon measurements have not specified for this analysis"
)

saveRDS(list(rc_meas=rc_meas),path_to_bad_analysis_file)
expect_error(
  do_bayesian_inference(data_dir,bad_analysis_name,hp),
  "A density model has not been specified for this analysis"
)

saveRDS(list(rc_meas=rc_meas,density_model=density_model),
        path_to_bad_analysis_file)
expect_error(
  do_bayesian_inference(data_dir,bad_analysis_name,hp),
  "A calibration curve has not been specified for this analysis"
)

invalid_density_model <- list(type="invalid_type")
saveRDS(list(rc_meas=rc_meas,
             density_model=invalid_density_model,
             calib_df=calib_df),
        path_to_bad_analysis_file)
expect_error(
  do_bayesian_inference(data_dir,bad_analysis_name),
  "Unsupported type for density_model"
)

saveRDS(list(rc_meas=rc_meas,
             density_model=density_model,
             calib_df=calib_df),
        path_to_analysis_file)
expect_error(
  do_bayesian_inference(data_dir,
                         analysis_name,
                         hp,
                         control=input_control),
  NA
)

analysis <- readRDS(path_to_analysis_file)

num_models <- length(density_model$K)
expect_equal(
  length(analysis$bayesian_solutions),
  num_models
)

expect_equal(
  analysis$input_seed,
  NA
)

expect_equal(
  is.na(analysis$base_seed),
  FALSE
)

expect_equal(
  length(analysis$base_seed),
  1
)

expect_equal(
  any(is.na(analysis$seed_mat)),
  FALSE
)

expect_equal(
  dim(analysis$seed_mat),
  c(num_models,2)
)

expect_equal(
  any(is.na(analysis$loo_vect)),
  FALSE
)

 expect_equal(
  length(analysis$loo_vect),
  num_models
)
expect_equal(
  any(is.na(analysis$waic_vect)),
  FALSE
)

 expect_equal(
  length(analysis$waic_vect),
  num_models
)

expect_equal(
  is.numeric(analysis$m_K_best),
  TRUE
)

expect_equal(
  is.numeric(analysis$K_best),
  TRUE
)

expect_error(
  do_bayesian_inference(data_dir,
                         analysis_name,
                         hp,
                         control=input_control),
  "Bayesian inference has already been done for this analysis"
)

# Check reproducibility
analysis0 <- readRDS(path_to_analysis_file)
base_seed <- analysis0$base_seed
analysis <- analysis0
# Undo the Bayesian inference
analysis$bayesian_solutions <- NULL
saveRDS(analysis,path_to_analysis_file)

expect_error(
  do_bayesian_inference(data_dir,
                         analysis_name,
                         hp,
                         input_seed=base_seed,
                         control=input_control),
  NA
)

analysis <- readRDS(path_to_analysis_file)
expect_equal(
  analysis$base_seed,
  base_seed
)

for (m_K in 1:length(analysis$bayesian_solutions)) {
  expect_equal(
    extract_param(analysis$bayesian_solutions[[m_K]]$fit),
    extract_param(analysis0$bayesian_solutions[[m_K]]$fit)
  )
}

bayesian_solutions <- analysis$bayesian_solutions
loo_vect  <- rep(NA,length(bayesian_solutions))
waic_vect <- rep(NA,length(bayesian_solutions))
for (m_K in 1:length(bayesian_solutions)) {
  log_lik_mat <- rstan::extract(bayesian_solutions[[m_K]]$fit,"logh")[[1]]
  waic_analysis <- loo::waic(log_lik_mat)
  waic_vect[m_K] <- waic_analysis$estimates["waic","Estimate"]
  loo_analysis <- loo::loo(log_lik_mat)
  loo_vect[m_K] <- loo_analysis["estimates"][[1]]["elpd_loo",
                                                   "Estimate"]
}
m_K_best <- which.max(loo_vect)

expect_equal(
  analysis$loo_vect,
  loo_vect
)

expect_equal(
  analysis$waic_vect,
  waic_vect
)
expect_equal(
  analysis$K_best,
  c(2,3)[m_K_best]
)

expect_equal(
  analysis$m_K_best,
  m_K_best
)

expect_equal(
  analysis$K_best,
  c(2,3)[m_K_best]
)


# (9h) do_bayesian_summary
#      [Currently, do_bayesian_summary is not checked when sim is in analysis.
#       However, the functioning of summarize_bayesian_inference with th_sim
#       is checked, and the vignette standard_pipeline.Rmd validates the
#       behavior of do_bayesian_summary when a simulation is used in the
#       standard pipeline.]
success <- file.remove(path_to_bad_analysis_file)
expect_error(
  do_bayesian_summary(data_dir,bad_analysis_name),
  "A save file for analysis_name does not exist in data_dir"
)

bad_analysis_name <- "bad"
path_to_bad_analysis_file <- file.path(data_dir,
                                       paste0(bad_analysis_name,".rds"))
saveRDS(list(),path_to_bad_analysis_file)
expect_error(
  do_bayesian_summary(data_dir,bad_analysis_name),
  "Radiocarbon measurements have not specified for this analysis"
)

saveRDS(list(rc_meas=rc_meas),path_to_bad_analysis_file)
expect_error(
  do_bayesian_summary(data_dir,bad_analysis_name),
  "A density model has not been specified for this analysis"
)

saveRDS(list(rc_meas=rc_meas,density_model=density_model),
        path_to_bad_analysis_file)
expect_error(
  do_bayesian_summary(data_dir,bad_analysis_name),
  "A calibration curve has not been specified for this analysis"
)

saveRDS(list(rc_meas=rc_meas,density_model=density_model,calib_df=calib_df),
        path_to_bad_analysis_file)
expect_error(
  do_bayesian_summary(data_dir,bad_analysis_name),
  "Bayesian inference has not been done for this analysis"
)

saveRDS(list(rc_meas=rc_meas,
             density_model=density_model,
             calib_df=calib_df,
             bayesian_solutions=analysis0$bayesian_solutions,
             hp=analysis0$hp,
             K_best=analysis0$K_best,
             m_K_best=analysis0$m_K_best,
             loo_vect=analysis0$waic_vect,
             waic_vect=analysis0$waic_vect),
        path_to_analysis_file)
expect_error(
  do_bayesian_summary(data_dir,
                      analysis_name),
  NA
)

# Check that analysis has the right fields
analysis <- readRDS(path_to_analysis_file)
expect_equal(
  "bayesian_summary" %in% names(analysis),
  TRUE
)

# Extract bayesian_solutions and bayesian_summary for use below
bayesian_solutions <- analysis$bayesian_solutions
bayesian_summary   <- analysis$bayesian_summary
expect_error(
  do_bayesian_summary(data_dir,
                         analysis_name),
  "A summary has already been done for this analysis"
)

# (9i) plot_best_solution
success <- file.remove(path_to_bad_analysis_file)
expect_error(
  plot_best_solution(data_dir,bad_analysis_name),
  "A save file for analysis_name does not exist in data_dir"
)

bad_analysis_name <- "bad"
path_to_bad_analysis_file <- file.path(data_dir,
                                       paste0(bad_analysis_name,".rds"))
saveRDS(list(),path_to_bad_analysis_file)
expect_error(
  plot_best_solution(data_dir,bad_analysis_name),
  "Radiocarbon measurements have not specified for this analysis"
)

saveRDS(list(rc_meas=rc_meas),path_to_bad_analysis_file)
expect_error(
  plot_best_solution(data_dir,bad_analysis_name),
  "A density model has not been specified for this analysis"
)

saveRDS(list(rc_meas=rc_meas,density_model=density_model),
        path_to_bad_analysis_file)
expect_error(
  plot_best_solution(data_dir,bad_analysis_name),
  "A calibration curve has not been specified for this analysis"
)

saveRDS(list(rc_meas=rc_meas,density_model=density_model,calib_df=calib_df),
        path_to_bad_analysis_file)
expect_error(
  plot_best_solution(data_dir,bad_analysis_name),
  "Bayesian inference has not been done for this analysis"
)

saveRDS(list(rc_meas=rc_meas,
             density_model=density_model,
             calib_df=calib_df,
             bayesian_solutions=bayesian_solutions),
        path_to_bad_analysis_file)
expect_error(
  plot_best_solution(data_dir,bad_analysis_name),
  "Summary has not been done for this analysis"
)

saveRDS(list(rc_meas=rc_meas,
             density_model=density_model,
             calib_df=calib_df,
             bayesian_solutions=bayesian_solutions,
             bayesian_summary=bayesian_summary),
        path_to_bad_analysis_file)
expect_error(
  plot_best_solution(data_dir,
                     analysis_name,
                     "temp.jpg"),
  "plot_file_name must be a .pdf .png file"
)

expect_error(
  plot_best_solution(data_dir,bad_analysis_name),
  NA
)

plot_file1 <- file.path(data_dir,plot_file_name="temp.pdf")
if (file.exists(plot_file1)) {
  success = file.remove(plot_file1)
}
expect_error(
  plot_best_solution(data_dir,bad_analysis_name,plot_file_name=plot_file1),
  NA
)

expect_true(
  file.exists(plot_file1),
  TRUE
)

plot_file2 <- file.path(data_dir,plot_file_name="temp.png")
if (file.exists(plot_file2)) {
  success = file.remove(plot_file2)
}
expect_error(
  plot_best_solution(data_dir,bad_analysis_name,plot_file_name=plot_file2),
  NA
)

expect_true(
  file.exists(plot_file2),
  TRUE
)

success <- file.remove(path_to_bad_analysis_file)
success <- file.remove(path_to_analysis_file)
success <- file.remove(plot_file1)
success <- file.remove(plot_file2)
eehh-stanford/baydem documentation built on June 3, 2024, 5:46 p.m.