tests/testthat/test_validate_fn.R

library(cvms)
context("validate_fn()")


test_that("quick: binomial glm model works with validate_fn()", {

  # Load data and fold it
  xpectr::set_test_seed(1)
  dat <- participant.scores %>%
    dplyr::mutate(diagnosis = as.factor(diagnosis))
  dat <- groupdata2::partition(
    dat,
    p = 0.75,
    cat_col = "diagnosis",
    id_col = "participant",
    list_out = FALSE
  )

  glm_model_fn <- function(train_data, formula, hyperparameters) {
    warning(paste0("a = ", hyperparameters[["a"]], "; b = ", hyperparameters[["b"]]))
    glm(formula = formula, data = train_data, family = "binomial")
  }

  glm_predict_fn <- predict_functions("glm_binomial")

  hparams <- list(
    "a" = c(1),
    "b" = c(3)
  )

  suppressWarnings(Vbinomlist <- validate_fn(
    train_data = dat,
    model_fn = glm_model_fn,
    predict_fn = glm_predict_fn,
    preprocess_once = TRUE,
    formulas = c("diagnosis~score"),
    hyperparameters = hparams,
    partitions_col = ".partitions",
    type = "binomial",
    metrics = list(
      "AIC" = TRUE,
      "AICc" = TRUE,
      "BIC" = TRUE
    ),
    positive = 2
  ))


  ## Testing 'Vbinomlist'                                                   ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Testing class
  expect_equal(
    class(Vbinomlist),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    Vbinomlist[["Fixed"]],
    "score",
    fixed = TRUE)
  expect_equal(
    Vbinomlist[["Balanced Accuracy"]],
    0.83333,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["F1"]],
    0.92308,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["Sensitivity"]],
    1,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["Specificity"]],
    0.66667,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["Pos Pred Value"]],
    0.85714,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["Neg Pred Value"]],
    1,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["AUC"]],
    0.94444,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["Lower CI"]],
    0.79046,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["Upper CI"]],
    1,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["Kappa"]],
    0.72727,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["MCC"]],
    0.75593,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["Detection Rate"]],
    0.66667,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["Detection Prevalence"]],
    0.77778,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["Prevalence"]],
    0.66667,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["AIC"]],
    28.45268,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["AICc"]],
    29.11935,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["BIC"]],
    30.54172,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["Convergence Warnings"]],
    0,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["Other Warnings"]],
    1,
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist[["Dependent"]],
    "diagnosis",
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(Vbinomlist),
    c("Fixed", "Balanced Accuracy", "F1", "Sensitivity", "Specificity",
      "Pos Pred Value", "Neg Pred Value", "AUC", "Lower CI", "Upper CI",
      "Kappa", "MCC", "Detection Rate", "Detection Prevalence", "Prevalence",
      "AIC", "AICc", "BIC", "Predictions", "ROC", "Confusion Matrix",
      "Coefficients", "Convergence Warnings", "Other Warnings", "Warnings and Messages",
      "Process", "HParams", "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(Vbinomlist),
    c("character", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "list", "list", "list", "list", "integer", "integer", "list",
      "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(Vbinomlist),
    c("character", "double", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "list", "list", "list", "list", "integer", "integer", "list",
      "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(Vbinomlist),
    c(1L, 29L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(Vbinomlist)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'Vbinomlist'                                          ####

})

test_that("binomial glm model works with validate_fn()", {

  testthat::skip_on_cran()

  # Load data and fold it
  xpectr::set_test_seed(1)
  dat_ready <- participant.scores %>%
    dplyr::mutate(diagnosis = as.factor(diagnosis))
  dat_list <- groupdata2::partition(dat_ready,
    p = 0.75,
    cat_col = "diagnosis",
    id_col = "participant"
  )

  xpectr::set_test_seed(1)
  dat_not_list <- groupdata2::partition(dat_ready,
    p = 0.75,
    cat_col = "diagnosis",
    id_col = "participant",
    list_out = FALSE
  )

  glm_model_fn <- function(train_data, formula, hyperparameters) {
    warning(paste0("a = ", hyperparameters[["a"]], "; b = ", hyperparameters[["b"]]))
    glm(formula = formula, data = train_data, family = "binomial")
  }

  glm_predict_fn <- predict_functions("glm_binomial")

  # The example fn requires formula, but we need it hardcoded for test
  glm_preprocess_fn <- function(train_data, test_data, formula, hyperparameters) {

    # Create recipes object
    recipe_object <- recipes::recipe(

      # Note: If we hardcoded the formula instead of using the formula argument
      # we could preprocess the train/test splits once
      # instead of for every formula
      # Tip: Use `y ~ .` to include all predictors (where `y` is your dependent variable)
      formula = diagnosis ~ .,
      data = train_data
    ) %>%

      # Add preprocessing steps
      # Note: We could add specific variable to each step
      # instead of just selecting all numeric variables
      recipes::step_center(recipes::all_numeric()) %>%
      recipes::step_scale(recipes::all_numeric()) %>%

      # Find parameters from the training set
      recipes::prep(training = train_data)

    # Apply preprocessing to the partitions
    train_data <- recipes::bake(recipe_object, train_data)
    test_data <- recipes::bake(recipe_object, test_data)

    # Extract the preprocessing parameters
    means <- recipe_object$steps[[1]]$means
    sds <- recipe_object$steps[[2]]$sds

    # Add preprocessing parameters to a tibble
    tidy_parameters <- tibble::tibble("Measure" = c("Mean", "SD")) %>%
      dplyr::bind_cols(dplyr::bind_rows(means, sds))

    list(
      "train" = train_data,
      "test" = test_data,
      "parameters" = tidy_parameters
    )
  }

  hparams <- list(
    "a" = c(1, 2),
    "b" = c(3, 4)
  )

  suppressWarnings(Vbinomlist_list <- validate_fn(
    train_data = dat_list[[1]],
    test_data = dat_list[[2]],
    model_fn = glm_model_fn,
    predict_fn = glm_predict_fn,
    preprocess_fn = glm_preprocess_fn,
    preprocess_once = FALSE,
    formulas = c("diagnosis~score", "diagnosis~age"),
    hyperparameters = hparams,
    type = "binomial",
    metrics = list(
      "AIC" = TRUE,
      "AICc" = TRUE,
      "BIC" = TRUE
    ),
    positive = 2
  ))

  suppressWarnings(Vbinomlist_not_list <- validate_fn(
    train_data = dat_not_list,
    model_fn = glm_model_fn,
    predict_fn = glm_predict_fn,
    preprocess_fn = glm_preprocess_fn,
    preprocess_once = TRUE,
    formulas = c("diagnosis~score", "diagnosis~age"),
    hyperparameters = hparams,
    partitions_col = ".partitions",
    type = "binomial",
    metrics = list(
      "AIC" = TRUE,
      "AICc" = TRUE,
      "BIC" = TRUE
    ),
    positive = 2
  ))

  # For when all.equal.list below fails
  # plyr::llply(colnames(Vbinomlist_list), function(cl){
  #   expect_equal(Vbinomlist_list[[cl]], Vbinomlist_not_list[[cl]])
  # })

  expect_true(all.equal.list(
    Vbinomlist_list,
    Vbinomlist_not_list
  ))
  warns <- dplyr::bind_rows(Vbinomlist_list$`Warnings and Messages`)
  expect_equal(
    warns,
    structure(list(
      Function = c(
        "model_fn", "model_fn", "model_fn",
        "model_fn", "model_fn", "model_fn", "model_fn", "model_fn"
      ),
      Type = c(
        "warning", "warning", "warning", "warning", "warning",
        "warning", "warning", "warning"
      ),
      Message = c(
        "a = 1; b = 3",
        "a = 2; b = 3",
        "a = 1; b = 4",
        "a = 2; b = 4",
        "a = 1; b = 3",
        "a = 2; b = 3",
        "a = 1; b = 4",
        "a = 2; b = 4"
      )
    ),
    row.names = c(NA, -8L), class = c("tbl_df", "tbl", "data.frame")
    )
  )

  ## Testing 'Vbinomlist_list'                                              ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Testing class
  expect_equal(
    class(Vbinomlist_list),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    Vbinomlist_list[["Fixed"]],
    c("score", "score", "score", "score", "age", "age", "age", "age"),
    fixed = TRUE)
  expect_equal(
    Vbinomlist_list[["Balanced Accuracy"]],
    c(0.83333, 0.83333, 0.83333, 0.83333, 0.5, 0.5, 0.5, 0.5),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["F1"]],
    c(0.92308, 0.92308, 0.92308, 0.92308, 0.8, 0.8, 0.8, 0.8),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["Sensitivity"]],
    c(1, 1, 1, 1, 1, 1, 1, 1),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["Specificity"]],
    c(0.66667, 0.66667, 0.66667, 0.66667, 0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["Pos Pred Value"]],
    c(0.85714, 0.85714, 0.85714, 0.85714, 0.66667, 0.66667, 0.66667,
      0.66667),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["Neg Pred Value"]],
    c(1, 1, 1, 1, NaN, NaN, NaN, NaN),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["AUC"]],
    c(0.94444, 0.94444, 0.94444, 0.94444, 0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["Lower CI"]],
    c(0.79046, 0.79046, 0.79046, 0.79046, 0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["Upper CI"]],
    c(1, 1, 1, 1, 0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["Kappa"]],
    c(0.72727, 0.72727, 0.72727, 0.72727, 0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["MCC"]],
    c(0.75593, 0.75593, 0.75593, 0.75593, 0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["Detection Rate"]],
    c(0.66667, 0.66667, 0.66667, 0.66667, 0.66667, 0.66667, 0.66667,
      0.66667),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["Detection Prevalence"]],
    c(0.77778, 0.77778, 0.77778, 0.77778, 1, 1, 1, 1),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["Prevalence"]],
    c(0.66667, 0.66667, 0.66667, 0.66667, 0.66667, 0.66667, 0.66667,
      0.66667),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["AIC"]],
    c(28.45268, 28.45268, 28.45268, 28.45268, 32.64402, 32.64402, 32.64402,
      32.64402),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["AICc"]],
    c(29.11935, 29.11935, 29.11935, 29.11935, 33.31069, 33.31069, 33.31069,
      33.31069),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["BIC"]],
    c(30.54172, 30.54172, 30.54172, 30.54172, 34.73307, 34.73307, 34.73307,
      34.73307),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["Convergence Warnings"]],
    c(0, 0, 0, 0, 0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["Other Warnings"]],
    c(1, 1, 1, 1, 1, 1, 1, 1),
    tolerance = 1e-4)
  expect_equal(
    Vbinomlist_list[["Dependent"]],
    c("diagnosis", "diagnosis", "diagnosis", "diagnosis", "diagnosis",
      "diagnosis", "diagnosis", "diagnosis"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(Vbinomlist_list),
    c("Fixed", "Balanced Accuracy", "F1", "Sensitivity", "Specificity",
      "Pos Pred Value", "Neg Pred Value", "AUC", "Lower CI", "Upper CI",
      "Kappa", "MCC", "Detection Rate", "Detection Prevalence", "Prevalence",
      "AIC", "AICc", "BIC", "Predictions", "ROC", "Confusion Matrix",
      "Coefficients", "Preprocess", "Convergence Warnings", "Other Warnings",
      "Warnings and Messages", "Process", "HParams", "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(Vbinomlist_list),
    c("character", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "list", "list", "list", "list", "list", "integer", "integer",
      "list", "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(Vbinomlist_list),
    c("character", "double", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "list", "list", "list", "list", "list", "integer", "integer",
      "list", "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(Vbinomlist_list),
    c(8L, 30L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(Vbinomlist_list)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'Vbinomlist_list'                                     ####

  res_hparams <- dplyr::bind_rows(Vbinomlist_list$HParams)

  ## Testing 'res_hparams'                                                  ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Testing class
  expect_equal(
    class(res_hparams),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    res_hparams[["a"]],
    c(1, 2, 1, 2, 1, 2, 1, 2),
    tolerance = 1e-4)
  expect_equal(
    res_hparams[["b"]],
    c(3, 3, 4, 4, 3, 3, 4, 4),
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(res_hparams),
    c("a", "b"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(res_hparams),
    c("numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(res_hparams),
    c("double", "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(res_hparams),
    c(8L, 2L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(res_hparams)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'res_hparams'                                         ####

  warn_mess <- dplyr::bind_rows(Vbinomlist_list$`Warnings and Messages`)

  ## Testing 'warn_mess'                                                    ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Testing class
  expect_equal(
    class(warn_mess),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    warn_mess[["Function"]],
    c("model_fn", "model_fn", "model_fn", "model_fn", "model_fn", "model_fn",
      "model_fn", "model_fn"),
    fixed = TRUE)
  expect_equal(
    warn_mess[["Type"]],
    c("warning", "warning", "warning", "warning", "warning", "warning",
      "warning", "warning"),
    fixed = TRUE)
  expect_equal(
    warn_mess[["Message"]],
    c("a = 1; b = 3", "a = 2; b = 3", "a = 1; b = 4", "a = 2; b = 4",
      "a = 1; b = 3", "a = 2; b = 3", "a = 1; b = 4", "a = 2; b = 4"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(warn_mess),
    c("Function", "Type", "Message"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(warn_mess),
    c("character", "character", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(warn_mess),
    c("character", "character", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(warn_mess),
    c(8L, 3L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(warn_mess)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'warn_mess'                                           ####

  preproc <- dplyr::bind_rows(Vbinomlist_list$Preprocess)

  ## Testing 'preproc'                                                      ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Testing class
  expect_equal(
    class(preproc),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    preproc[["Measure"]],
    c("Mean", "SD", "Mean", "SD", "Mean", "SD", "Mean", "SD", "Mean",
      "SD", "Mean", "SD", "Mean", "SD", "Mean", "SD"),
    fixed = TRUE)
  expect_equal(
    preproc[["age"]],
    c(29.28571, 7.93185, 29.28571, 7.93185, 29.28571, 7.93185, 29.28571,
      7.93185, 29.28571, 7.93185, 29.28571, 7.93185, 29.28571, 7.93185,
      29.28571, 7.93185),
    tolerance = 1e-4)
  expect_equal(
    preproc[["score"]],
    c(40.28571, 19.42201, 40.28571, 19.42201, 40.28571, 19.42201, 40.28571,
      19.42201, 40.28571, 19.42201, 40.28571, 19.42201, 40.28571,
      19.42201, 40.28571, 19.42201),
    tolerance = 1e-4)
  expect_equal(
    preproc[["session"]],
    c(2, 0.83666, 2, 0.83666, 2, 0.83666, 2, 0.83666, 2, 0.83666, 2,
      0.83666, 2, 0.83666, 2, 0.83666),
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(preproc),
    c("Measure", "age", "score", "session"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(preproc),
    c("character", "numeric", "numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(preproc),
    c("character", "double", "double", "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(preproc),
    c(16L, 4L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(preproc)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'preproc'                                             ####

  coefs <- dplyr::bind_rows(Vbinomlist_list$Coefficients)

  ## Testing 'coefs'                                                        ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Testing class
  expect_equal(
    class(coefs),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    coefs[["term"]],
    c("(Intercept)", "score", "(Intercept)", "score", "(Intercept)",
      "score", "(Intercept)", "score", "(Intercept)", "age", "(Intercept)",
      "age", "(Intercept)", "age", "(Intercept)", "age"),
    fixed = TRUE)
  expect_equal(
    coefs[["estimate"]],
    c(0.33703, -1.04134, 0.33703, -1.04134, 0.33703, -1.04134, 0.33703,
      -1.04134, 0.28819, -0.08818, 0.28819, -0.08818, 0.28819, -0.08818,
      0.28819, -0.08818),
    tolerance = 1e-4)
  expect_equal(
    coefs[["std.error"]],
    c(0.49167, 0.56828, 0.49167, 0.56828, 0.49167, 0.56828, 0.49167,
      0.56828, 0.44139, 0.45173, 0.44139, 0.45173, 0.44139, 0.45173,
      0.44139, 0.45173),
    tolerance = 1e-4)
  expect_equal(
    coefs[["statistic"]],
    c(0.68548, -1.83244, 0.68548, -1.83244, 0.68548, -1.83244, 0.68548,
      -1.83244, 0.65292, -0.19519, 0.65292, -0.19519, 0.65292, -0.19519,
      0.65292, -0.19519),
    tolerance = 1e-4)
  expect_equal(
    coefs[["p.value"]],
    c(0.49304, 0.06689, 0.49304, 0.06689, 0.49304, 0.06689, 0.49304,
      0.06689, 0.51381, 0.84524, 0.51381, 0.84524, 0.51381, 0.84524,
      0.51381, 0.84524),
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(coefs),
    c("term", "estimate", "std.error", "conf.level",
      "conf.low", "conf.high", "statistic", "df.error", "p.value"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(coefs),
    c("character", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(coefs),
    c("character", "double", "double", "double",
      "double",  "double", "double", "double", "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(coefs),
    c(16L, 9L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(coefs)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'coefs'                                               ####

  ## Testing 'Vbinomlist_list$`Confusion Matrix`[[1]]'                      ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- Vbinomlist_list$`Confusion Matrix`[[1]]
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["Prediction"]],
    c("0", "1", "0", "1"),
    fixed = TRUE)
  expect_equal(
    output_19148[["Target"]],
    c("0", "0", "1", "1"),
    fixed = TRUE)
  expect_equal(
    output_19148[["Pos_0"]],
    c("TP", "FN", "FP", "TN"),
    fixed = TRUE)
  expect_equal(
    output_19148[["Pos_1"]],
    c("TN", "FP", "FN", "TP"),
    fixed = TRUE)
  expect_equal(
    output_19148[["N"]],
    c(2, 1, 0, 6),
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("Prediction", "Target", "Pos_0", "Pos_1", "N"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("character", "character", "character", "character", "integer"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("character", "character", "character", "character", "integer"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    4:5)
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'Vbinomlist_list$`Confusion Matrix`[[1]]'             ####

  ## Testing 'Vbinomlist_list$Predictions[[1]]'                             ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- Vbinomlist_list$Predictions[[1]]
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["Observation"]],
    c(1, 2, 3, 4, 5, 6, 7, 8, 9),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Target"]],
    structure(c(1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L), .Label = c("0",
      "1"), class = "factor"))
  expect_equal(
    output_19148[["Prediction"]],
    c(0.65032, 0.45418, 0.15642, 0.84459, 0.70858, 0.58719, 0.8515,
      0.76071, 0.70858),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Predicted Class"]],
    c("1", "0", "0", "1", "1", "1", "1", "1", "1"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("Observation", "Target", "Prediction", "Predicted Class"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("integer", "factor", "numeric", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("integer", "integer", "double", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(9L, 4L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'Vbinomlist_list$Predictions[[1]]'                    ####


  models <- dplyr::as_tibble(t(sapply(Vbinomlist_list$Model, FUN = summary)))
  expect_equal(
    unlist(models$aic),
    c(
      28.4526796540548, 28.4526796540548, 28.4526796540548, 28.4526796540548,
      32.6440231222318, 32.6440231222318, 32.6440231222318, 32.6440231222318
    )
  )
  expect_equal(
    unlist(models$deviance),
    c(
      24.4526796540548, 24.4526796540548, 24.4526796540548, 24.4526796540548,
      28.6440231222318, 28.6440231222318, 28.6440231222318, 28.6440231222318
    )
  )

})

test_that("fuzz testing validate_fn()", {

  testthat::skip_on_cran()

  xpectr::set_test_seed(42)

  dat_ready <- participant.scores %>%
    dplyr::mutate(diagnosis = as.factor(diagnosis)) %>%
    groupdata2::partition(
      p = 0.75,
      cat_col = "diagnosis",
      id_col = "participant",
      list_out = FALSE
  )

  glm_model_fn <- function(train_data, formula, hyperparameters) {
    glm(formula = formula, data = train_data, family = "binomial")
  }

  glm_predict_fn <- predict_functions("glm_binomial")

  # The example fn requires formula, but we need it hardcoded for test
  glm_preprocess_fn <- function(train_data, test_data, formula, hyperparameters) {

    # Create recipes object
    recipe_object <- recipes::recipe(

      # Note: If we hardcoded the formula instead of using the formula argument
      # we could preprocess the train/test splits once
      # instead of for every formula
      # Tip: Use `y ~ .` to include all predictors (where `y` is your dependent variable)
      formula = diagnosis ~ .,
      data = train_data
    ) %>%

      # Add preprocessing steps
      # Note: We could add specific variable to each step
      # instead of just selecting all numeric variables
      recipes::step_center(recipes::all_numeric()) %>%
      recipes::step_scale(recipes::all_numeric()) %>%

      # Find parameters from the training set
      recipes::prep(training = train_data)

    # Apply preprocessing to the partitions
    train_data <- recipes::bake(recipe_object, train_data)
    test_data <- recipes::bake(recipe_object, test_data)

    # Extract the preprocessing parameters
    means <- recipe_object$steps[[1]]$means
    sds <- recipe_object$steps[[2]]$sds

    # Add preprocessing parameters to a tibble
    tidy_parameters <- tibble::tibble("Measure" = c("Mean", "SD")) %>%
      dplyr::bind_cols(dplyr::bind_rows(means, sds))

    list(
      "train" = train_data,
      "test" = test_data,
      "parameters" = tidy_parameters
    )
  }

  hparams <- list(
    "a" = c(1, 2),
    "b" = c(3, 4)
  )

  # Generate expectations for 'validate_fn'
  # Tip: comment out the gxs_function() call
  # so it is easy to regenerate the tests
  xpectr::set_test_seed(42)
  # xpectr::gxs_function(
  #   fn = validate_fn,
  #   args_values = list(
  #     "train_data" = list(dat_ready, NA, 1),
  #     "formulas" = list("diagnosis~score","diagnosis~score+(1|session)",
  #                       "diagnosis~", "score + age", NA, 1),
  #     "type" = list("binomial", "gaussian", "lol", NA, 1),
  #     "model_fn" = list(glm_model_fn, identity, NA, 1),
  #     "predict_fn" = list(glm_predict_fn, identity, NA, 1),
  #     "test_data" = list(NULL),
  #     "preprocess_fn" = list(glm_preprocess_fn, identity, NA, 1),
  #     "preprocess_once" = list(FALSE, TRUE, NA, 1),
  #     "hyperparameters" = list(hparams, list(), NA, 1),
  #     "partitions_col" = list(".partitions", ".nope"),
  #     "cutoff" = list(0.5, -1, 2),
  #     "positive" = list(2, "1", 1, 0, NA),
  #     "metrics" = list(list("all" = FALSE, "Accuracy" = TRUE, "Sensitivity" = TRUE), "almost all"),
  #     "rm_nc" = list(FALSE, "What?"),
  #     "parallel" = list(FALSE),
  #     "verbose" = list(FALSE, TRUE)
  #   ),
  #   indentation = 2
  # )


  ## Testing 'validate_fn'                                                    ####
  ## Initially generated by xpectr
  # Testing different combinations of argument values

  # Testing validate_fn(train_data = dat_ready, formulas...
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["Fixed"]],
    c("score", "score", "score", "score"),
    fixed = TRUE)
  expect_equal(
    output_19148[["Accuracy"]],
    c(0.44444, 0.44444, 0.44444, 0.44444),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Sensitivity"]],
    c(0.5, 0.5, 0.5, 0.5),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["ROC"]],
    c(NA, NA, NA, NA))
  expect_equal(
    output_19148[["Convergence Warnings"]],
    c(0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Other Warnings"]],
    c(0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Dependent"]],
    c("diagnosis", "diagnosis", "diagnosis", "diagnosis"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("Fixed", "Accuracy", "Sensitivity", "Predictions", "ROC", "Confusion Matrix",
      "Coefficients", "Preprocess", "Convergence Warnings", "Other Warnings",
      "Warnings and Messages", "Process", "HParams", "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("character", "numeric", "numeric", "list", "logical", "list",
      "list", "list", "integer", "integer", "list", "list", "list",
      "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("character", "double", "double", "list", "logical", "list", "list",
      "list", "integer", "integer", "list", "list", "list", "list",
      "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(4L, 15L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = NA, formulas = "dia...
  # Changed from baseline: train_data = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19370 <- xpectr::capture_side_effects(validate_fn(train_data = NA, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19370[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'train_data': Must be of type 'data.frame', not 'logical'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19370[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = 1, formulas = "diag...
  # Changed from baseline: train_data = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_12861 <- xpectr::capture_side_effects(validate_fn(train_data = 1, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12861[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'train_data': Must be of type 'data.frame', not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12861[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = NULL, formulas = "d...
  # Changed from baseline: train_data = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_18304 <- xpectr::capture_side_effects(validate_fn(train_data = NULL, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18304[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'train_data': Must be of type 'data.frame', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18304[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: formulas = "diagnosis...
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16417 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score+(1|session)", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  if (!is_newer_R_version(4,2)){
    for (w in side_effects_16417[['warnings']]){
      expect_match(xpectr::strip(w), "rank[[:space:]]*deficient", fixed=FALSE)
    }
  }

  # Now has messages?, but this is not too important to test.
  # expect_equal(
  #   xpectr::strip(side_effects_16417[['messages']]),
  #   xpectr::strip(character(0)),
  #   fixed = TRUE
  # )
  # Assigning output
  output_16417 <- xpectr::suppress_mw(validate_fn(train_data = dat_ready, formulas = "diagnosis~score+(1|session)", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE))
  # Testing class
  expect_equal(
    class(output_16417),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_16417[["Fixed"]],
    c("score", "score", "score", "score"),
    fixed = TRUE)
  expect_equal(
    output_16417[["Accuracy"]],
    c(0.44444, 0.44444, 0.44444, 0.44444),
    tolerance = 1e-4)
  expect_equal(
    output_16417[["Sensitivity"]],
    c(0.5, 0.5, 0.5, 0.5),
    tolerance = 1e-4)
  expect_equal(
    output_16417[["ROC"]],
    c(NA, NA, NA, NA))
  expect_equal(
    output_16417[["Convergence Warnings"]],
    c(0, 0, 0, 0),
    tolerance = 1e-4)
  if (!is_newer_R_version(4,2)){
    expect_equal( # Does not throw these warnings in 4.3
      output_16417[["Other Warnings"]],
      c(1, 1, 1, 1),
      tolerance = 1e-4)
  }
  expect_equal(
    output_16417[["Dependent"]],
    c("diagnosis", "diagnosis", "diagnosis", "diagnosis"),
    fixed = TRUE)
  expect_equal(
    output_16417[["Random"]],
    c("(1|session)", "(1|session)", "(1|session)", "(1|session)"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_16417),
    c("Fixed", "Accuracy", "Sensitivity", "Predictions", "ROC", "Confusion Matrix",
      "Coefficients", "Preprocess", "Convergence Warnings", "Other Warnings",
      "Warnings and Messages", "Process", "HParams", "Model", "Dependent",
      "Random"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_16417),
    c("character", "numeric", "numeric", "list", "logical", "list",
      "list", "list", "integer", "integer", "list", "list", "list",
      "list", "character", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_16417),
    c("character", "double", "double", "list", "logical", "list", "list",
      "list", "integer", "integer", "list", "list", "list", "list",
      "character", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_16417),
    c(4L, 16L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_16417)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: formulas = "diagnosis~"
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_15190 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15190[['error']]),
    xpectr::strip("The model formula does not contain a dependent variable."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15190[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: formulas = "score + age"
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_17365 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score + age", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17365[['error']]),
    xpectr::strip("The model formula does not contain a dependent variable."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17365[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: formulas = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_11346 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = NA, type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11346[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'formulas': Contains missing values (element 1)."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11346[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: formulas = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16569 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = 1, type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16569[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'formulas': Must be of type 'character', not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16569[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: formulas = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_17050 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = NULL, type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17050[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'formulas': Must be of type 'character', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17050[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: type = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14577 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = NA, model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14577[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'family/type': Must be element of set {'gaussian','binomial','multinomial'}, but is\n * 'NA'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14577[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: type = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_17191 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = 1, model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17191[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'family/type': Must be element of set {'gaussian','binomial','multinomial'}, but\n * types do not match (numeric != character)."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17191[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: type = "gaussian"
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19346 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "gaussian", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19346[['error']]),
    xpectr::strip("'metrics_list' contained unknown metric names: Accuracy, Sensitivity."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19346[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: type = "lol"
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_12554 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "lol", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12554[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'family/type': Must be element of set {'gaussian','binomial','multinomial'}, but is\n * 'lol'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12554[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: type = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14622 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = NULL, model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14622[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'family/type': Must be a subset of {'gaussian','binomial','multinomial'}, not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14622[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: model_fn = identity
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19400 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = identity, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_match(
    xpectr::strip(side_effects_19400[['error']], lowercase = TRUE),
    xpectr::strip(paste0("Must be a identical to\n *",
                         ifelse(is_checkmate_v2_1(), " set", ""),
                         " (train_data,formula,hyperparameters)."), lowercase = TRUE), # model_fn argument names
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19400[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: model_fn = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19782 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = NA, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19782[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'model_fn': Must be a function, not 'logical'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19782[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: model_fn = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_11174 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = 1, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11174[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'model_fn': Must be a function, not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11174[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: model_fn = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14749 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = NULL, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14749[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'model_fn': Must be a function, not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14749[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: predict_fn = identity
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_15603 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = identity, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_match(
    xpectr::strip(side_effects_15603[['error']], lowercase = TRUE),
    xpectr::strip(paste0("must be a identical to\n * ",
                         ifelse(is_checkmate_v2_1(), "set", ""),
                         " (test_data,model,formula,hyperpa",
                         "rameters,train_data)."), lowercase = TRUE), # predict_fn argument names
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15603[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: predict_fn = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19040 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = NA, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19040[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'predict_fn': Must be a function, not 'logical'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19040[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: predict_fn = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_11387 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = 1, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11387[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'predict_fn': Must be a function, not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11387[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: predict_fn = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19888 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = NULL, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19888[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'predict_fn': Must be a function, not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19888[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: preprocess_fn = identity
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19466 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = identity, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_match(
    xpectr::strip(side_effects_19466[['error']], lowercase = TRUE),
    xpectr::strip(paste0("must be a identical to\n *",
                         ifelse(is_checkmate_v2_1(), "set", ""),
                         " (train_data,test_data,formula,hy",
                         "perparameters)."), lowercase = TRUE), # preprocess_fn argument names
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19466[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: preprocess_fn = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_10824 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = NA, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10824[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'preprocess_fn': Must be a function (or 'NULL'), not 'logical'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10824[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: preprocess_fn = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_15142 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = 1, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15142[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'preprocess_fn': Must be a function (or 'NULL'), not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15142[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: preprocess_fn = NULL
  xpectr::set_test_seed(42)
  # Assigning output
  output_13902 <- validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)
  # Testing class
  expect_equal(
    class(output_13902),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_13902[["Fixed"]],
    c("score", "score", "score", "score"),
    fixed = TRUE)
  expect_equal(
    output_13902[["Accuracy"]],
    c(0.44444, 0.44444, 0.44444, 0.44444),
    tolerance = 1e-4)
  expect_equal(
    output_13902[["Sensitivity"]],
    c(0.5, 0.5, 0.5, 0.5),
    tolerance = 1e-4)
  expect_equal(
    output_13902[["ROC"]],
    c(NA, NA, NA, NA))
  expect_equal(
    output_13902[["Convergence Warnings"]],
    c(0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_13902[["Other Warnings"]],
    c(0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_13902[["Dependent"]],
    c("diagnosis", "diagnosis", "diagnosis", "diagnosis"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_13902),
    c("Fixed", "Accuracy", "Sensitivity", "Predictions", "ROC", "Confusion Matrix",
      "Coefficients", "Convergence Warnings", "Other Warnings", "Warnings and Messages",
      "Process", "HParams", "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_13902),
    c("character", "numeric", "numeric", "list", "logical", "list",
      "list", "integer", "integer", "list", "list", "list", "list",
      "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_13902),
    c("character", "double", "double", "list", "logical", "list", "list",
      "integer", "integer", "list", "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_13902),
    c(4L, 14L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_13902)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: preprocess_once = TRUE
  xpectr::set_test_seed(42)
  # Assigning output
  output_19057 <- validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = TRUE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)
  # Testing class
  expect_equal(
    class(output_19057),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19057[["Fixed"]],
    c("score", "score", "score", "score"),
    fixed = TRUE)
  expect_equal(
    output_19057[["Accuracy"]],
    c(0.44444, 0.44444, 0.44444, 0.44444),
    tolerance = 1e-4)
  expect_equal(
    output_19057[["Sensitivity"]],
    c(0.5, 0.5, 0.5, 0.5),
    tolerance = 1e-4)
  expect_equal(
    output_19057[["ROC"]],
    c(NA, NA, NA, NA))
  expect_equal(
    output_19057[["Convergence Warnings"]],
    c(0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_19057[["Other Warnings"]],
    c(0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_19057[["Dependent"]],
    c("diagnosis", "diagnosis", "diagnosis", "diagnosis"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_19057),
    c("Fixed", "Accuracy", "Sensitivity", "Predictions", "ROC", "Confusion Matrix",
      "Coefficients", "Preprocess", "Convergence Warnings", "Other Warnings",
      "Warnings and Messages", "Process", "HParams", "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19057),
    c("character", "numeric", "numeric", "list", "logical", "list",
      "list", "list", "integer", "integer", "list", "list", "list",
      "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19057),
    c("character", "double", "double", "list", "logical", "list", "list",
      "list", "integer", "integer", "list", "list", "list", "list",
      "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19057),
    c(4L, 15L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19057)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: preprocess_once = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14469 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = NA, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14469[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'preprocess_once': May not be NA."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14469[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: preprocess_once = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_18360 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = 1, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18360[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'preprocess_once': Must be of type 'logical flag', not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18360[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: preprocess_once = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_17375 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = NULL, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17375[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'preprocess_once': Must be of type 'logical flag', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17375[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: hyperparameters = list()
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_18110 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = list(), partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18110[['error']]),
    xpectr::strip(paste0("Assertion failed. One of the following must apply:\n * chec",
                         "kmate::check_data_frame(hyperparameters): Must be of type 'd",
                         "ata.frame' (or 'NULL'), not 'list'\n * checkmate::check_list",
                         "(hyperparameters): Must have length >= 1, but has length 0")),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18110[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: hyperparameters = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_13881 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = NA, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_13881[['error']]),
    xpectr::strip(paste0("Assertion failed. One of the following must apply:\n * chec",
                         "kmate::check_data_frame(hyperparameters): Must be of type 'd",
                         "ata.frame' (or 'NULL'), not 'logical'\n * checkmate::check_l",
                         "ist(hyperparameters): Must be of type 'list' (or 'NULL'), no",
                         "t 'logical'")),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_13881[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: hyperparameters = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16851 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = 1, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16851[['error']]),
    xpectr::strip(paste0("Assertion failed. One of the following must apply:\n * chec",
                         "kmate::check_data_frame(hyperparameters): Must be of type 'd",
                         "ata.frame' (or 'NULL'), not 'double'\n * checkmate::check_li",
                         "st(hyperparameters): Must be of type 'list' (or 'NULL'), not",
                         " 'double'")),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16851[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: hyperparameters = NULL
  xpectr::set_test_seed(42)
  # Assigning output
  output_10039 <- validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = NULL, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)
  # Testing class
  expect_equal(
    class(output_10039),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_10039[["Fixed"]],
    "score",
    fixed = TRUE)
  expect_equal(
    output_10039[["Accuracy"]],
    0.44444,
    tolerance = 1e-4)
  expect_equal(
    output_10039[["Sensitivity"]],
    0.5,
    tolerance = 1e-4)
  expect_equal(
    output_10039[["ROC"]],
    NA)
  expect_equal(
    output_10039[["Convergence Warnings"]],
    0,
    tolerance = 1e-4)
  expect_equal(
    output_10039[["Other Warnings"]],
    0,
    tolerance = 1e-4)
  expect_equal(
    output_10039[["Dependent"]],
    "diagnosis",
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_10039),
    c("Fixed", "Accuracy", "Sensitivity", "Predictions", "ROC", "Confusion Matrix",
      "Coefficients", "Preprocess", "Convergence Warnings", "Other Warnings",
      "Warnings and Messages", "Process", "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_10039),
    c("character", "numeric", "numeric", "list", "logical", "list",
      "list", "list", "integer", "integer", "list", "list", "list",
      "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_10039),
    c("character", "double", "double", "list", "logical", "list", "list",
      "list", "integer", "integer", "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_10039),
    c(1L, 14L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_10039)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: partitions_col = ".nope"
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_18329 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".nope", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18329[['error']]),
    xpectr::strip("1 assertions failed:\n * Could not find 'partition_col' column in 'train_data'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18329[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: partitions_col = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_10073 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = NULL, cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10073[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'partitions_col': Must be of type 'string', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10073[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: cutoff = -1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_12076 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = -1, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12076[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'cutoff': Element 1 is not >= 0."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12076[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: cutoff = 2
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19066 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 2, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19066[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'cutoff': Element 1 is not <= 1."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19066[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: cutoff = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16117 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = NULL, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16117[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'cutoff': Must be of type 'number', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16117[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: positive = "1"
  xpectr::set_test_seed(42)
  # Assigning output
  output_13795 <- validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = "1", metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)
  # Testing class
  expect_equal(
    class(output_13795),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_13795[["Fixed"]],
    c("score", "score", "score", "score"),
    fixed = TRUE)
  expect_equal(
    output_13795[["Accuracy"]],
    c(0.44444, 0.44444, 0.44444, 0.44444),
    tolerance = 1e-4)
  expect_equal(
    output_13795[["Sensitivity"]],
    c(0.5, 0.5, 0.5, 0.5),
    tolerance = 1e-4)
  expect_equal(
    output_13795[["ROC"]],
    c(NA, NA, NA, NA))
  expect_equal(
    output_13795[["Convergence Warnings"]],
    c(0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_13795[["Other Warnings"]],
    c(0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_13795[["Dependent"]],
    c("diagnosis", "diagnosis", "diagnosis", "diagnosis"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_13795),
    c("Fixed", "Accuracy", "Sensitivity", "Predictions", "ROC", "Confusion Matrix",
      "Coefficients", "Preprocess", "Convergence Warnings", "Other Warnings",
      "Warnings and Messages", "Process", "HParams", "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_13795),
    c("character", "numeric", "numeric", "list", "logical", "list",
      "list", "list", "integer", "integer", "list", "list", "list",
      "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_13795),
    c("character", "double", "double", "list", "logical", "list", "list",
      "list", "integer", "integer", "list", "list", "list", "list",
      "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_13795),
    c(4L, 15L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_13795)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: positive = 1
  xpectr::set_test_seed(42)
  # Assigning output
  output_14357 <- validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 1, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)
  # Testing class
  expect_equal(
    class(output_14357),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_14357[["Fixed"]],
    c("score", "score", "score", "score"),
    fixed = TRUE)
  expect_equal(
    output_14357[["Accuracy"]],
    c(0.44444, 0.44444, 0.44444, 0.44444),
    tolerance = 1e-4)
  expect_equal(
    output_14357[["Sensitivity"]],
    c(0.33333, 0.33333, 0.33333, 0.33333),
    tolerance = 1e-4)
  expect_equal(
    output_14357[["ROC"]],
    c(NA, NA, NA, NA))
  expect_equal(
    output_14357[["Convergence Warnings"]],
    c(0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_14357[["Other Warnings"]],
    c(0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_14357[["Dependent"]],
    c("diagnosis", "diagnosis", "diagnosis", "diagnosis"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_14357),
    c("Fixed", "Accuracy", "Sensitivity", "Predictions", "ROC", "Confusion Matrix",
      "Coefficients", "Preprocess", "Convergence Warnings", "Other Warnings",
      "Warnings and Messages", "Process", "HParams", "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_14357),
    c("character", "numeric", "numeric", "list", "logical", "list",
      "list", "list", "integer", "integer", "list", "list", "list",
      "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_14357),
    c("character", "double", "double", "list", "logical", "list", "list",
      "list", "integer", "integer", "list", "list", "list", "list",
      "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_14357),
    c(4L, 15L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_14357)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: positive = 0
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_10374 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 0, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10374[['error']]),
    xpectr::strip("Assertion failed. One of the following must apply:\n * checkmate::check_choice(positive): Must be element of set {'1','2'}, but is '0'\n * checkmate::check_string(positive): Must be of type 'string', not 'double'"),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10374[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: positive = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19735 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = NA, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19735[['error']]),
    xpectr::strip("Assertion failed. One of the following must apply:\n * checkmate::check_choice(positive): Must be element of set {'1','2'}, but is 'NA'\n * checkmate::check_string(positive): May not be NA"),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19735[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: positive = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14317 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = NULL, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14317[['error']]),
    xpectr::strip("Assertion failed. One of the following must apply:\n * checkmate::check_choice(positive): Must be a subset of {'1','2'}, not 'NULL'\n * checkmate::check_string(positive): Must be of type 'string', not 'NULL'"),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14317[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: metrics = "almost all"
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19575 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = "almost all", rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19575[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'metrics': Must be of type 'list', not 'character'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19575[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: metrics = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_18877 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = NULL, rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18877[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'metrics': Must be of type 'list', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18877[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: rm_nc = "What?"
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16399 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = "What?", parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16399[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'rm_nc': Must be of type 'logical flag', not 'character'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16399[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: rm_nc = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19709 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = NULL, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19709[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'rm_nc': Must be of type 'logical flag', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19709[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: parallel = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16188 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = NULL, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16188[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'parallel': Must be of type 'logical flag', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16188[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: verbose = TRUE
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_13334 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = TRUE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_13334[['warnings']]),
    xpectr::strip(character(0)),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_13334[['messages']]),
    xpectr::strip("Will validate 4 models.\n"),
    fixed = TRUE)
  # Assigning output
  output_13334 <- xpectr::suppress_mw(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = TRUE))
  # Testing class
  expect_equal(
    class(output_13334),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_13334[["Fixed"]],
    c("score", "score", "score", "score"),
    fixed = TRUE)
  expect_equal(
    output_13334[["Accuracy"]],
    c(0.44444, 0.44444, 0.44444, 0.44444),
    tolerance = 1e-4)
  expect_equal(
    output_13334[["Sensitivity"]],
    c(0.5, 0.5, 0.5, 0.5),
    tolerance = 1e-4)
  expect_equal(
    output_13334[["ROC"]],
    c(NA, NA, NA, NA))
  expect_equal(
    output_13334[["Convergence Warnings"]],
    c(0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_13334[["Other Warnings"]],
    c(0, 0, 0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_13334[["Dependent"]],
    c("diagnosis", "diagnosis", "diagnosis", "diagnosis"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_13334),
    c("Fixed", "Accuracy", "Sensitivity", "Predictions", "ROC", "Confusion Matrix",
      "Coefficients", "Preprocess", "Convergence Warnings", "Other Warnings",
      "Warnings and Messages", "Process", "HParams", "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_13334),
    c("character", "numeric", "numeric", "list", "logical", "list",
      "list", "list", "integer", "integer", "list", "list", "list",
      "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_13334),
    c("character", "double", "double", "list", "logical", "list", "list",
      "list", "integer", "integer", "list", "list", "list", "list",
      "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_13334),
    c(4L, 15L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_13334)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: verbose = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_13467 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "diagnosis~score", type = "binomial", model_fn = glm_model_fn, predict_fn = glm_predict_fn, test_data = NULL, preprocess_fn = glm_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", cutoff = 0.5, positive = 2, metrics = list(all = FALSE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = NULL), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_13467[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'verbose': Must be of type 'logical flag', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_13467[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  ## Finished testing 'validate_fn'                                           ####
  #

})

test_that("fuzz testing gaussian lm model with validate_fn()",{

  testthat::skip_on_cran()

  # Load data and fold it
  xpectr::set_test_seed(1)
  dat_ready <- participant.scores %>%
    dplyr::mutate(diagnosis = as.factor(diagnosis)) %>%
    groupdata2::partition(p = 0.75,
                          cat_col = "diagnosis",
                          id_col = "participant",
                          list_out = FALSE)

  lm_model_fn <- function(train_data, formula, hyperparameters){
    lm(formula = formula, data = train_data)
  }

  lm_predict_fn <- predict_functions("lm")

  # Generate expectations for 'validate_fn'
  # Tip: comment out the gxs_function() call
  # so it is easy to regenerate the tests
  xpectr::set_test_seed(42)
  # xpectr::gxs_function(
  #   fn = validate_fn,
  #   args_values = list(
  #     "train_data" = list(dat_ready, participant.scores, NA, 1),
  #     "formulas" = list("score ~ diagnosis", "score ~ diagnosis + (1|session)", "~", "score ~ nope", NA, 1),
  #     "type" = list("gaussian", "nope", 1),
  #     "model_fn" = list(lm_model_fn, identity, NA),
  #     "predict_fn" = list(lm_predict_fn, identity, NA),
  #     "test_data" = list(NULL),
  #     "preprocess_fn" = list(NULL),
  #     "preprocess_once" = list(FALSE, TRUE, NA),
  #     "hyperparameters" = list(list("a" = c(1,2), "b" = c(2)), list(), character(3), NA, 1),
  #     "partitions_col" = list(".partitions", ".nope", 1, NA),
  #     "metrics" = list(list("all" = FALSE, "RMSE" = TRUE)),
  #     "rm_nc" = list(FALSE),
  #     "parallel" = list(FALSE),
  #     "verbose" = list(FALSE, TRUE)
  #   ),
  #   indentation = 2
  # )


  ## Testing 'validate_fn'                                                    ####
  ## Initially generated by xpectr
  # Testing different combinations of argument values

  # Testing validate_fn(train_data = dat_ready, formulas...
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["Fixed"]],
    c("diagnosis", "diagnosis"),
    fixed = TRUE)
  expect_equal(
    output_19148[["RMSE"]],
    c(14.32077, 14.32077),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Convergence Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Other Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Dependent"]],
    c("score", "score"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("Fixed", "RMSE", "Predictions", "Coefficients", "Convergence Warnings",
      "Other Warnings", "Warnings and Messages", "Process", "HParams",
      "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("character", "numeric", "list", "list", "integer", "integer",
      "list", "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("character", "double", "list", "list", "integer", "integer", "list",
      "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(2L, 11L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = participant.scores,...
  # Changed from baseline: train_data = particip...
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19370 <- xpectr::capture_side_effects(validate_fn(train_data = participant.scores, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19370[['error']]),
    xpectr::strip("1 assertions failed:\n * Could not find 'partition_col' column in 'train_data'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19370[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = NA, formulas = "sco...
  # Changed from baseline: train_data = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_12861 <- xpectr::capture_side_effects(validate_fn(train_data = NA, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12861[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'train_data': Must be of type 'data.frame', not 'logical'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12861[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = 1, formulas = "scor...
  # Changed from baseline: train_data = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_18304 <- xpectr::capture_side_effects(validate_fn(train_data = 1, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18304[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'train_data': Must be of type 'data.frame', not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18304[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = NULL, formulas = "s...
  # Changed from baseline: train_data = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16417 <- xpectr::capture_side_effects(validate_fn(train_data = NULL, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16417[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'train_data': Must be of type 'data.frame', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16417[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: formulas = "score ~ d...
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_15190 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis + (1|session)", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  if (!is_newer_R_version(4,2)){
    for (w in side_effects_15190[['warnings']]){
      expect_match(xpectr::strip(w), "rank[[:space:]]*deficient", fixed=FALSE)
    }
  }

  # Now has messages? But this is not too important to test
  # expect_equal(
  #   xpectr::strip(side_effects_15190[['messages']]),
  #   xpectr::strip(character(0)),
  #   fixed = TRUE)
  # }
  # Assigning output
  output_15190 <- xpectr::suppress_mw(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis + (1|session)", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE))
  # Testing class
  expect_equal(
    class(output_15190),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_15190[["Fixed"]],
    c("diagnosis", "diagnosis"),
    fixed = TRUE)
  expect_equal(
    output_15190[["RMSE"]],
    c(14.32077, 14.32077),
    tolerance = 1e-4)
  expect_equal(
    output_15190[["Convergence Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  if (!is_newer_R_version(4,2)){
    expect_equal( # Does not throw these warnings in 4.3
      output_15190[["Other Warnings"]],
      c(1, 1),
      tolerance = 1e-4)
  }
  expect_equal(
    output_15190[["Dependent"]],
    c("score", "score"),
    fixed = TRUE)
  expect_equal(
    output_15190[["Random"]],
    c("(1|session)", "(1|session)"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_15190),
    c("Fixed", "RMSE", "Predictions", "Coefficients", "Convergence Warnings",
      "Other Warnings", "Warnings and Messages", "Process", "HParams",
      "Model", "Dependent", "Random"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_15190),
    c("character", "numeric", "list", "list", "integer", "integer",
      "list", "list", "list", "list", "character", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_15190),
    c("character", "double", "list", "list", "integer", "integer", "list",
      "list", "list", "list", "character", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_15190),
    c(2L, 12L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_15190)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: formulas = "~"
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_17365 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "~", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17365[['error']]),
    xpectr::strip("The model formula does not contain a dependent variable."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17365[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: formulas = "score ~ n...
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_11346 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ nope", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_match(
    xpectr::strip(side_effects_11346[['error']]),
    xpectr::strip("object 'nope' not found\n\nFor:\nFormula: score ~ nope\nFold column: .partitions\nFold: 2\nHyperparameters: a : 1, b : 2\n"),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11346[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: formulas = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16569 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = NA, type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16569[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'formulas': Contains missing values (element 1)."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16569[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: formulas = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_17050 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = 1, type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17050[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'formulas': Must be of type 'character', not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17050[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: formulas = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14577 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = NULL, type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14577[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'formulas': Must be of type 'character', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14577[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: type = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_17191 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = 1, model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17191[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'family/type': Must be element of set {'gaussian','binomial','multinomial'}, but\n * types do not match (numeric != character)."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17191[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: type = "nope"
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19346 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "nope", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19346[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'family/type': Must be element of set {'gaussian','binomial','multinomial'}, but is\n * 'nope'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19346[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: type = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_12554 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = NULL, model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12554[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'family/type': Must be a subset of {'gaussian','binomial','multinomial'}, not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12554[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: model_fn = identity
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14622 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = identity, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_match(
    xpectr::strip(side_effects_14622[['error']], lowercase = TRUE),
    xpectr::strip(paste0("Must be a identical to\n * ",
                         ifelse(is_checkmate_v2_1(), "set ", ""),
                         "(train_data,formula,hyperparameters)."), lowercase = TRUE), # model_fn argument names
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14622[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: model_fn = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19400 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = NA, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19400[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'model_fn': Must be a function, not 'logical'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19400[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: model_fn = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19782 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = NULL, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19782[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'model_fn': Must be a function, not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19782[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: predict_fn = identity
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_11174 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = identity, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_match(
    xpectr::strip(side_effects_11174[['error']], lowercase = TRUE),
    xpectr::strip(paste0("Must be a identical to\n * ",
                         ifelse(is_checkmate_v2_1(), "set ", ""),
                         "(test_data,model,formula,hyperparameters,train_data)."), lowercase = TRUE),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11174[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: predict_fn = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14749 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = NA, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14749[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'predict_fn': Must be a function, not 'logical'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14749[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: predict_fn = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_15603 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = NULL, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15603[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'predict_fn': Must be a function, not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15603[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: preprocess_once = TRUE
  xpectr::set_test_seed(42)
  # Assigning output
  output_19040 <- validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = TRUE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)
  # Testing class
  expect_equal(
    class(output_19040),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19040[["Fixed"]],
    c("diagnosis", "diagnosis"),
    fixed = TRUE)
  expect_equal(
    output_19040[["RMSE"]],
    c(14.32077, 14.32077),
    tolerance = 1e-4)
  expect_equal(
    output_19040[["Convergence Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_19040[["Other Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_19040[["Dependent"]],
    c("score", "score"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_19040),
    c("Fixed", "RMSE", "Predictions", "Coefficients", "Convergence Warnings",
      "Other Warnings", "Warnings and Messages", "Process", "HParams",
      "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19040),
    c("character", "numeric", "list", "list", "integer", "integer",
      "list", "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19040),
    c("character", "double", "list", "list", "integer", "integer", "list",
      "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19040),
    c(2L, 11L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19040)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: preprocess_once = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_11387 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = NA, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11387[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'preprocess_once': May not be NA."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11387[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: preprocess_once = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19888 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = NULL, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19888[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'preprocess_once': Must be of type 'logical flag', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19888[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: hyperparameters = list()
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19466 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19466[['error']]),
    xpectr::strip(paste0("Assertion failed. One of the following must apply:\n * chec",
                         "kmate::check_data_frame(hyperparameters): Must be of type 'd",
                         "ata.frame' (or 'NULL'), not 'list'\n * checkmate::check_list",
                         "(hyperparameters): Must have length >= 1, but has length 0")),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19466[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: hyperparameters = cha...
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_10824 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = character(3), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10824[['error']]),
    xpectr::strip(paste0("Assertion failed. One of the following must apply:\n * chec",
                         "kmate::check_data_frame(hyperparameters): Must be of type 'd",
                         "ata.frame' (or 'NULL'), not 'character'\n * checkmate::check",
                         "_list(hyperparameters): Must be of type 'list' (or 'NULL'), ",
                         "not 'character'")),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10824[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: hyperparameters = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_15142 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = NA, partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15142[['error']]),
    xpectr::strip(paste0("Assertion failed. One of the following must apply:\n * chec",
                         "kmate::check_data_frame(hyperparameters): Must be of type 'd",
                         "ata.frame' (or 'NULL'), not 'logical'\n * checkmate::check_l",
                         "ist(hyperparameters): Must be of type 'list' (or 'NULL'), no",
                         "t 'logical'")),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15142[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: hyperparameters = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_13902 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = 1, partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_13902[['error']]),
    xpectr::strip(paste0("Assertion failed. One of the following must apply:\n * chec",
                         "kmate::check_data_frame(hyperparameters): Must be of type 'd",
                         "ata.frame' (or 'NULL'), not 'double'\n * checkmate::check_li",
                         "st(hyperparameters): Must be of type 'list' (or 'NULL'), not",
                         " 'double'")),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_13902[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: hyperparameters = NULL
  xpectr::set_test_seed(42)
  # Assigning output
  output_19057 <- validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = NULL, partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)
  # Testing class
  expect_equal(
    class(output_19057),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19057[["Fixed"]],
    "diagnosis",
    fixed = TRUE)
  expect_equal(
    output_19057[["RMSE"]],
    14.32077,
    tolerance = 1e-4)
  expect_equal(
    output_19057[["Convergence Warnings"]],
    0,
    tolerance = 1e-4)
  expect_equal(
    output_19057[["Other Warnings"]],
    0,
    tolerance = 1e-4)
  expect_equal(
    output_19057[["Dependent"]],
    "score",
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_19057),
    c("Fixed", "RMSE", "Predictions", "Coefficients", "Convergence Warnings",
      "Other Warnings", "Warnings and Messages", "Process", "Model",
      "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19057),
    c("character", "numeric", "list", "list", "integer", "integer",
      "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19057),
    c("character", "double", "list", "list", "integer", "integer", "list",
      "list", "list", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19057),
    c(1L, 10L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19057)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: partitions_col = ".nope"
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14469 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".nope", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14469[['error']]),
    xpectr::strip("1 assertions failed:\n * Could not find 'partition_col' column in 'train_data'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14469[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: partitions_col = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_18360 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = 1, metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18360[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'partitions_col': Must be of type 'string', not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18360[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: partitions_col = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_17375 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = NA, metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17375[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'partitions_col': May not be NA."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17375[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: partitions_col = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_18110 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = NULL, metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18110[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'partitions_col': Must be of type 'string', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18110[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: metrics = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_13881 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = NULL, rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_13881[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'metrics': Must be of type 'list', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_13881[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: rm_nc = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16851 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = NULL, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16851[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'rm_nc': Must be of type 'logical flag', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16851[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: parallel = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_10039 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = NULL, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10039[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'parallel': Must be of type 'logical flag', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10039[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: verbose = TRUE
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_18329 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = TRUE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18329[['warnings']]),
    xpectr::strip(character(0)),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18329[['messages']]),
    xpectr::strip("Will validate 2 models.\n"),
    fixed = TRUE)
  # Assigning output
  output_18329 <- xpectr::suppress_mw(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = TRUE))
  # Testing class
  expect_equal(
    class(output_18329),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_18329[["Fixed"]],
    c("diagnosis", "diagnosis"),
    fixed = TRUE)
  expect_equal(
    output_18329[["RMSE"]],
    c(14.32077, 14.32077),
    tolerance = 1e-4)
  expect_equal(
    output_18329[["Convergence Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_18329[["Other Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_18329[["Dependent"]],
    c("score", "score"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_18329),
    c("Fixed", "RMSE", "Predictions", "Coefficients", "Convergence Warnings",
      "Other Warnings", "Warnings and Messages", "Process", "HParams",
      "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_18329),
    c("character", "numeric", "list", "list", "integer", "integer",
      "list", "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_18329),
    c("character", "double", "list", "list", "integer", "integer", "list",
      "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_18329),
    c(2L, 11L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_18329)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = dat_ready, formulas...
  # Changed from baseline: verbose = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_10073 <- xpectr::capture_side_effects(validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = NULL), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10073[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'verbose': Must be of type 'logical flag', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10073[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  ## Finished testing 'validate_fn'                                           ####
  #

})

test_that("fuzz testing multinomial nnet model with validate_fn()", {

  testthat::skip_on_cran()
  testthat::skip_if_not_installed("nnet")

  # Load data and fold it
  xpectr::set_test_seed(1)

  # Create and fold dataset
  data_mc <- multiclass_probability_tibble(
    num_classes = 3,
    num_observations = 50,
    apply_softmax = TRUE,
    FUN = runif,
    class_name = "predictor_"
  )
  class_names <- paste0("class_", c(1, 2, 3))
  data_mc[["target"]] <- factor(sample(x = class_names,
                                       size = 50, replace = TRUE))
  data_mc <- groupdata2::partition(data_mc, p = 0.70, cat_col = "target", list_out = FALSE)

  multinom_model_fn <-
    function(train_data, formula, hyperparameters) {
      nnet::multinom(formula = formula, # converted to formula object within fit_model()
                     data = train_data)
    }

  multinom_predict_fn <-
    predict_functions("nnet_multinom")

  multinom_preprocess_fn <- preprocess_functions("scale")

  hparams <- list("a" = c(1,2), "b" = c(1))

  # Generate expectations for 'validate_fn'
  # Tip: comment out the gxs_function() call
  # so it is easy to regenerate the tests
  xpectr::set_test_seed(42)
  # xpectr::gxs_function(
  #   fn = validate_fn,
  #   args_values = list(
  #     "train_data" = list(data_mc, participant.scores, NA, 1),
  #     "formulas" = list("target ~ predictor_1 + predictor_2 + predictor_3", "lol", NA, 1),
  #     "type" = list("multinomial", "gaussian", 1),
  #     "model_fn" = list(multinom_model_fn, identity, NA, 1),
  #     "predict_fn" = list(multinom_predict_fn, identity, NA, 1),
  #     "test_data" = list(NULL, NA),
  #     "preprocess_fn" = list(NULL, multinom_preprocess_fn, identity, NA, 1),
  #     "preprocess_once" = list(FALSE, TRUE),
  #     "hyperparameters" = list(hparams, NA, character(3)),
  #     "partitions_col" = list(".partitions"),
  #     "metrics" = list(list("all" = FALSE, "MCC" = TRUE, "Accuracy" = TRUE, "Sensitivity" = TRUE),
  #                      list(), "almost all", 1),
  #     "rm_nc" = list(FALSE),
  #     "parallel" = list(FALSE),
  #     "verbose" = list(FALSE, TRUE)
  #   ), extra_combinations = list(
  #     list("metrics" = list(),
  #          "type" = "gaussian")
  #   ),
  #   indentation = 2
  # )


  ## Testing 'validate_fn'                                                    ####
  ## Initially generated by xpectr
  # Testing different combinations of argument values

  # Testing validate_fn(train_data = data_mc, formulas =...
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["Fixed"]],
    c("predictor_1+predictor_2+predictor_3", "predictor_1+predictor_2+predictor_3"),
    fixed = TRUE)
  expect_equal(
    output_19148[["Accuracy"]],
    c(0.52941, 0.52941),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Sensitivity"]],
    c(0.27778, 0.27778),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["MCC"]],
    c(-0.10224, -0.10224),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Convergence Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Other Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Dependent"]],
    c("target", "target"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("Fixed", "Accuracy", "Sensitivity", "MCC", "Predictions", "Confusion Matrix",
      "Class Level Results", "Coefficients", "Convergence Warnings",
      "Other Warnings", "Warnings and Messages", "Process", "HParams",
      "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("character", "numeric", "numeric", "numeric", "list", "list",
      "list", "list", "integer", "integer", "list", "list", "list",
      "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("character", "double", "double", "double", "list", "list", "list",
      "list", "integer", "integer", "list", "list", "list", "list",
      "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(2L, 15L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = participant.scores,...
  # Changed from baseline: train_data = particip...
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19370 <- xpectr::capture_side_effects(validate_fn(train_data = participant.scores, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19370[['error']]),
    xpectr::strip("1 assertions failed:\n * Could not find 'partition_col' column in 'train_data'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19370[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = NA, formulas = "tar...
  # Changed from baseline: train_data = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_12861 <- xpectr::capture_side_effects(validate_fn(train_data = NA, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12861[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'train_data': Must be of type 'data.frame', not 'logical'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12861[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = 1, formulas = "targ...
  # Changed from baseline: train_data = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_18304 <- xpectr::capture_side_effects(validate_fn(train_data = 1, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18304[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'train_data': Must be of type 'data.frame', not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18304[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = NULL, formulas = "t...
  # Changed from baseline: train_data = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16417 <- xpectr::capture_side_effects(validate_fn(train_data = NULL, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16417[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'train_data': Must be of type 'data.frame', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16417[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: formulas = "lol"
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_15190 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "lol", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15190[['error']]),
    xpectr::strip("The model formula does not contain a dependent variable."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15190[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: formulas = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_17365 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = NA, type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17365[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'formulas': Contains missing values (element 1)."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17365[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: formulas = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_11346 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = 1, type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11346[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'formulas': Must be of type 'character', not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11346[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: formulas = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16569 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = NULL, type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16569[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'formulas': Must be of type 'character', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16569[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: type = "gaussian"
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_17050 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "gaussian", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17050[['error']]),
    xpectr::strip("'metrics_list' contained unknown metric names: MCC, Accuracy, Sensitivity."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17050[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: type = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14577 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = 1, model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14577[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'family/type': Must be element of set {'gaussian','binomial','multinomial'}, but\n * types do not match (numeric != character)."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14577[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: type = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_17191 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = NULL, model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17191[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'family/type': Must be a subset of {'gaussian','binomial','multinomial'}, not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17191[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: type, metrics
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19346 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "gaussian", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_match(
    xpectr::strip(side_effects_19346[['error']]),
    xpectr::strip("When 'type'/'family' is 'gaussian', the predictions must be a vector or matrix / data frame with one column but was a matrix with 3 columns. Did you specify 'predict_fn' correctly?\n\nFor:\nFormula: target ~ predictor_1 + predictor_2 + predictor_3\nFold column: .partitions\nFold: 2\n"),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19346[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: model_fn = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_12554 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = NA, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12554[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'model_fn': Must be a function, not 'logical'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12554[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: model_fn = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14622 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = 1, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14622[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'model_fn': Must be a function, not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14622[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: model_fn = identity
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19400 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = identity, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_match(
    xpectr::strip(side_effects_19400[['error']], lowercase = TRUE),
    xpectr::strip(paste0("Must be a identical to\n * ",
                         ifelse(is_checkmate_v2_1(), "set ", ""),
                         "(train_data,formula,hyperparameters)."), lowercase = TRUE), # model_fn argument names
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19400[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: model_fn = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19782 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = NULL, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19782[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'model_fn': Must be a function, not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19782[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: predict_fn = identity
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_11174 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = identity, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_match(
    xpectr::strip(side_effects_11174[['error']], lowercase = TRUE),
    xpectr::strip(paste0("Must be a identical to\n * ",
                         ifelse(is_checkmate_v2_1(), "set ", ""),
                         "(test_data,model,formula,hyperparameters,train_data)."), lowercase = TRUE), # predict_fn argument names
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11174[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: predict_fn = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14749 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = NA, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14749[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'predict_fn': Must be a function, not 'logical'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14749[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: predict_fn = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_15603 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = 1, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15603[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'predict_fn': Must be a function, not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15603[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: predict_fn = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19040 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = NULL, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19040[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'predict_fn': Must be a function, not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19040[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: test_data = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_11387 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NA, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11387[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'test_data': Must be of type 'data.frame' (or 'NULL'), not 'logical'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11387[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: preprocess_fn = multi...
  xpectr::set_test_seed(42)
  # Assigning output
  output_19888 <- validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = multinom_preprocess_fn, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)
  # Testing class
  expect_equal(
    class(output_19888),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19888[["Fixed"]],
    c("predictor_1+predictor_2+predictor_3", "predictor_1+predictor_2+predictor_3"),
    fixed = TRUE)
  expect_equal(
    output_19888[["Accuracy"]],
    c(0.52941, 0.52941),
    tolerance = 1e-4)
  expect_equal(
    output_19888[["Sensitivity"]],
    c(0.27778, 0.27778),
    tolerance = 1e-4)
  expect_equal(
    output_19888[["MCC"]],
    c(-0.10224, -0.10224),
    tolerance = 1e-4)
  expect_equal(
    output_19888[["Convergence Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_19888[["Other Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_19888[["Dependent"]],
    c("target", "target"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_19888),
    c("Fixed", "Accuracy", "Sensitivity", "MCC", "Predictions", "Confusion Matrix",
      "Class Level Results", "Coefficients", "Preprocess", "Convergence Warnings",
      "Other Warnings", "Warnings and Messages", "Process", "HParams",
      "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19888),
    c("character", "numeric", "numeric", "numeric", "list", "list",
      "list", "list", "list", "integer", "integer", "list", "list",
      "list", "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19888),
    c("character", "double", "double", "double", "list", "list", "list",
      "list", "list", "integer", "integer", "list", "list", "list",
      "list", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19888),
    c(2L, 16L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19888)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: preprocess_fn = identity
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19466 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = identity, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_match(
    xpectr::strip(side_effects_19466[['error']], lowercase = TRUE),
    xpectr::strip(paste0("Must be a identical to\n * ",
                         ifelse(is_checkmate_v2_1(), "set ", ""),
                         "(train_data,test_data,formula,hyperparameters)."), lowercase = TRUE), # preprocess_fn argument names
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19466[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: preprocess_fn = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_10824 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NA, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10824[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'preprocess_fn': Must be a function (or 'NULL'), not 'logical'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10824[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: preprocess_fn = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_15142 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = 1, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15142[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'preprocess_fn': Must be a function (or 'NULL'), not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15142[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: preprocess_once = TRUE
  xpectr::set_test_seed(42)
  # Assigning output
  output_13902 <- validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = TRUE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)
  # Testing class
  expect_equal(
    class(output_13902),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_13902[["Fixed"]],
    c("predictor_1+predictor_2+predictor_3", "predictor_1+predictor_2+predictor_3"),
    fixed = TRUE)
  expect_equal(
    output_13902[["Accuracy"]],
    c(0.52941, 0.52941),
    tolerance = 1e-4)
  expect_equal(
    output_13902[["Sensitivity"]],
    c(0.27778, 0.27778),
    tolerance = 1e-4)
  expect_equal(
    output_13902[["MCC"]],
    c(-0.10224, -0.10224),
    tolerance = 1e-4)
  expect_equal(
    output_13902[["Convergence Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_13902[["Other Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_13902[["Dependent"]],
    c("target", "target"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_13902),
    c("Fixed", "Accuracy", "Sensitivity", "MCC", "Predictions", "Confusion Matrix",
      "Class Level Results", "Coefficients", "Convergence Warnings",
      "Other Warnings", "Warnings and Messages", "Process", "HParams",
      "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_13902),
    c("character", "numeric", "numeric", "numeric", "list", "list",
      "list", "list", "integer", "integer", "list", "list", "list",
      "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_13902),
    c("character", "double", "double", "double", "list", "list", "list",
      "list", "integer", "integer", "list", "list", "list", "list",
      "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_13902),
    c(2L, 15L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_13902)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: preprocess_once = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19057 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = NULL, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19057[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'preprocess_once': Must be of type 'logical flag', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19057[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: hyperparameters = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14469 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = NA, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14469[['error']]),
    xpectr::strip(paste0("Assertion failed. One of the following must apply:\n * chec",
                         "kmate::check_data_frame(hyperparameters): Must be of type 'd",
                         "ata.frame' (or 'NULL'), not 'logical'\n * checkmate::check_l",
                         "ist(hyperparameters): Must be of type 'list' (or 'NULL'), no",
                         "t 'logical'")),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14469[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: hyperparameters = cha...
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_18360 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = character(3), partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18360[['error']]),
    xpectr::strip(paste0("Assertion failed. One of the following must apply:\n * chec",
                         "kmate::check_data_frame(hyperparameters): Must be of type 'd",
                         "ata.frame' (or 'NULL'), not 'character'\n * checkmate::check",
                         "_list(hyperparameters): Must be of type 'list' (or 'NULL'), ",
                         "not 'character'")),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18360[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: hyperparameters = NULL
  xpectr::set_test_seed(42)
  # Assigning output
  output_17375 <- validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = NULL, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)
  # Testing class
  expect_equal(
    class(output_17375),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_17375[["Fixed"]],
    "predictor_1+predictor_2+predictor_3",
    fixed = TRUE)
  expect_equal(
    output_17375[["Accuracy"]],
    0.52941,
    tolerance = 1e-4)
  expect_equal(
    output_17375[["Sensitivity"]],
    0.27778,
    tolerance = 1e-4)
  expect_equal(
    output_17375[["MCC"]],
    -0.10224,
    tolerance = 1e-4)
  expect_equal(
    output_17375[["Convergence Warnings"]],
    0,
    tolerance = 1e-4)
  expect_equal(
    output_17375[["Other Warnings"]],
    0,
    tolerance = 1e-4)
  expect_equal(
    output_17375[["Dependent"]],
    "target",
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_17375),
    c("Fixed", "Accuracy", "Sensitivity", "MCC", "Predictions", "Confusion Matrix",
      "Class Level Results", "Coefficients", "Convergence Warnings",
      "Other Warnings", "Warnings and Messages", "Process", "Model",
      "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_17375),
    c("character", "numeric", "numeric", "numeric", "list", "list",
      "list", "list", "integer", "integer", "list", "list", "list",
      "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_17375),
    c("character", "double", "double", "double", "list", "list", "list",
      "list", "integer", "integer", "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_17375),
    c(1L, 14L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_17375)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: partitions_col = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_18110 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = NULL, metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18110[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'partitions_col': Must be of type 'string', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18110[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: metrics = list()
  xpectr::set_test_seed(42)
  # Assigning output
  output_13881 <- validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)
  # Testing class
  expect_equal(
    class(output_13881),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_13881[["Fixed"]],
    c("predictor_1+predictor_2+predictor_3", "predictor_1+predictor_2+predictor_3"),
    fixed = TRUE)
  expect_equal(
    output_13881[["Overall Accuracy"]],
    c(0.29412, 0.29412),
    tolerance = 1e-4)
  expect_equal(
    output_13881[["Balanced Accuracy"]],
    c(0.45707, 0.45707),
    tolerance = 1e-4)
  expect_equal(
    output_13881[["F1"]],
    c(NaN, NaN),
    tolerance = 1e-4)
  expect_equal(
    output_13881[["Sensitivity"]],
    c(0.27778, 0.27778),
    tolerance = 1e-4)
  expect_equal(
    output_13881[["Specificity"]],
    c(0.63636, 0.63636),
    tolerance = 1e-4)
  expect_equal(
    output_13881[["Pos Pred Value"]],
    c(NaN, NaN),
    tolerance = 1e-4)
  expect_equal(
    output_13881[["Neg Pred Value"]],
    c(0.62881, 0.62881),
    tolerance = 1e-4)
  expect_equal(
    output_13881[["Kappa"]],
    c(-0.07933, -0.07933),
    tolerance = 1e-4)
  expect_equal(
    output_13881[["MCC"]],
    c(-0.10224, -0.10224),
    tolerance = 1e-4)
  expect_equal(
    output_13881[["Detection Rate"]],
    c(0.09804, 0.09804),
    tolerance = 1e-4)
  expect_equal(
    output_13881[["Detection Prevalence"]],
    c(0.33333, 0.33333),
    tolerance = 1e-4)
  expect_equal(
    output_13881[["Prevalence"]],
    c(0.33333, 0.33333),
    tolerance = 1e-4)
  expect_equal(
    output_13881[["Convergence Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_13881[["Other Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_13881[["Dependent"]],
    c("target", "target"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_13881),
    c("Fixed", "Overall Accuracy", "Balanced Accuracy", "F1", "Sensitivity",
      "Specificity", "Pos Pred Value", "Neg Pred Value", "Kappa",
      "MCC", "Detection Rate", "Detection Prevalence", "Prevalence",
      "Predictions", "Confusion Matrix", "Class Level Results", "Coefficients",
      "Convergence Warnings", "Other Warnings", "Warnings and Messages",
      "Process", "HParams", "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_13881),
    c("character", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "list", "list", "list", "list", "integer", "integer",
      "list", "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_13881),
    c("character", "double", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double", "list", "list", "list", "list", "integer", "integer",
      "list", "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_13881),
    c(2L, 24L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_13881)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: metrics = "almost all"
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16851 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = "almost all", rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16851[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'metrics': Must be of type 'list', not 'character'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16851[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: metrics = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_10039 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = 1, rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10039[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'metrics': Must be of type 'list', not 'double'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10039[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: metrics = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_18329 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = NULL, rm_nc = FALSE, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18329[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'metrics': Must be of type 'list', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18329[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: rm_nc = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_10073 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = NULL, parallel = FALSE, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10073[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'rm_nc': Must be of type 'logical flag', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_10073[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: parallel = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_12076 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = NULL, verbose = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12076[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'parallel': Must be of type 'logical flag', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12076[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: verbose = TRUE
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19066 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = TRUE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19066[['warnings']]),
    xpectr::strip(character(0)),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19066[['messages']]),
    xpectr::strip("Will validate 2 models.\n"),
    fixed = TRUE)
  # Assigning output
  output_19066 <- xpectr::suppress_mw(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = TRUE))
  # Testing class
  expect_equal(
    class(output_19066),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19066[["Fixed"]],
    c("predictor_1+predictor_2+predictor_3", "predictor_1+predictor_2+predictor_3"),
    fixed = TRUE)
  expect_equal(
    output_19066[["Accuracy"]],
    c(0.52941, 0.52941),
    tolerance = 1e-4)
  expect_equal(
    output_19066[["Sensitivity"]],
    c(0.27778, 0.27778),
    tolerance = 1e-4)
  expect_equal(
    output_19066[["MCC"]],
    c(-0.10224, -0.10224),
    tolerance = 1e-4)
  expect_equal(
    output_19066[["Convergence Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_19066[["Other Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    output_19066[["Dependent"]],
    c("target", "target"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_19066),
    c("Fixed", "Accuracy", "Sensitivity", "MCC", "Predictions", "Confusion Matrix",
      "Class Level Results", "Coefficients", "Convergence Warnings",
      "Other Warnings", "Warnings and Messages", "Process", "HParams",
      "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19066),
    c("character", "numeric", "numeric", "numeric", "list", "list",
      "list", "list", "integer", "integer", "list", "list", "list",
      "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19066),
    c("character", "double", "double", "double", "list", "list", "list",
      "list", "integer", "integer", "list", "list", "list", "list",
      "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19066),
    c(2L, 15L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19066)),
    character(0),
    fixed = TRUE)

  # Testing validate_fn(train_data = data_mc, formulas =...
  # Changed from baseline: verbose = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16117 <- xpectr::capture_side_effects(validate_fn(train_data = data_mc, formulas = "target ~ predictor_1 + predictor_2 + predictor_3", type = "multinomial", model_fn = multinom_model_fn, predict_fn = multinom_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = hparams, partitions_col = ".partitions", metrics = list(all = FALSE, MCC = TRUE, Accuracy = TRUE, Sensitivity = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = NULL), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16117[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'verbose': Must be of type 'logical flag', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16117[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  ## Finished testing 'validate_fn'                                           ####
  #
})


test_that("testing nested tibbles in multinomial validate_fn()", {

  testthat::skip_if_not_installed("nnet")

  # Load data and fold it
  xpectr::set_test_seed(1)

  # Create and fold dataset
  data_mc <- multiclass_probability_tibble(
    num_classes = 3,
    num_observations = 50,
    apply_softmax = TRUE,
    FUN = runif,
    class_name = "predictor_"
  )
  class_names <- paste0("class_", c(1, 2, 3))
  data_mc[["target"]] <- factor(sample(x = class_names,
                                       size = 50, replace = TRUE))
  data_mc <- groupdata2::partition(data_mc, p = 0.70, cat_col = "target", list_out = FALSE)

  multinom_model_fn <-
    function(train_data, formula, hyperparameters) {
      nnet::multinom(formula = formula, # converted to formula object within fit_model()
                     data = train_data)
    }

  multinom_predict_fn <-
    predict_functions("nnet_multinom")

  multinom_preprocess_fn <- preprocess_functions("scale")

  hparams <- list("a" = c(1,2), "b" = c(1))


  vld_multinomial <-
    validate_fn(
      train_data = data_mc,
      formulas = "target ~ predictor_1 + predictor_2 + predictor_3",
      type = "multinomial",
      model_fn = multinom_model_fn,
      predict_fn = multinom_predict_fn,
      test_data = NULL,
      preprocess_fn = NULL,
      preprocess_once = FALSE,
      hyperparameters = hparams,
      partitions_col = ".partitions",
      metrics = list(
        all = TRUE
      ),
      rm_nc = FALSE,
      parallel = FALSE,
      verbose = FALSE
    )

  ## Testing 'vld_multinomial'                                              ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Testing class
  expect_equal(
    class(vld_multinomial),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    vld_multinomial[["Fixed"]],
    c("predictor_1+predictor_2+predictor_3", "predictor_1+predictor_2+predictor_3"),
    fixed = TRUE)
  expect_equal(
    vld_multinomial[["Overall Accuracy"]],
    c(0.29412, 0.29412),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Balanced Accuracy"]],
    c(0.45707, 0.45707),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted Balanced Accuracy"]],
    c(0.45455, 0.45455),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Accuracy"]],
    c(0.52941, 0.52941),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted Accuracy"]],
    c(0.51903, 0.51903),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["F1"]],
    c(NaN, NaN),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted F1"]],
    c(NaN, NaN),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Sensitivity"]],
    c(0.27778, 0.27778),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted Sensitivity"]],
    c(0.29412, 0.29412),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Specificity"]],
    c(0.63636, 0.63636),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted Specificity"]],
    c(0.61497, 0.61497),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Pos Pred Value"]],
    c(NaN, NaN),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted Pos Pred Value"]],
    c(NaN, NaN),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Neg Pred Value"]],
    c(0.62881, 0.62881),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted Neg Pred Value"]],
    c(0.62428, 0.62428),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["AUC"]],
    c(0.38889, 0.38889),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Kappa"]],
    c(-0.07933, -0.07933),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted Kappa"]],
    c(-0.084, -0.084),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["MCC"]],
    c(-0.10224, -0.10224),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Detection Rate"]],
    c(0.09804, 0.09804),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted Detection Rate"]],
    c(0.10381, 0.10381),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Detection Prevalence"]],
    c(0.33333, 0.33333),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted Detection Prevalence"]],
    c(0.35294, 0.35294),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Prevalence"]],
    c(0.33333, 0.33333),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted Prevalence"]],
    c(0.33564, 0.33564),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["False Neg Rate"]],
    c(0.72222, 0.72222),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted False Neg Rate"]],
    c(0.70588, 0.70588),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["False Pos Rate"]],
    c(0.36364, 0.36364),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted False Pos Rate"]],
    c(0.38503, 0.38503),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["False Discovery Rate"]],
    c(NaN, NaN),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted False Discovery Rate"]],
    c(NaN, NaN),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["False Omission Rate"]],
    c(0.37119, 0.37119),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted False Omission Rate"]],
    c(0.37572, 0.37572),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Threat Score"]],
    c(0.13889, 0.13889),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Weighted Threat Score"]],
    c(0.14706, 0.14706),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["AIC"]],
    c(80.67823, 80.67823),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["AICc"]],
    c(83.909, 83.909),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["BIC"]],
    c(89.65728, 89.65728),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Convergence Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Other Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    vld_multinomial[["Dependent"]],
    c("target", "target"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(vld_multinomial),
    c("Fixed", "Overall Accuracy", "Balanced Accuracy", "Weighted Balanced Accuracy",
      "Accuracy", "Weighted Accuracy", "F1", "Weighted F1", "Sensitivity",
      "Weighted Sensitivity", "Specificity", "Weighted Specificity",
      "Pos Pred Value", "Weighted Pos Pred Value", "Neg Pred Value",
      "Weighted Neg Pred Value", "AUC", "Kappa", "Weighted Kappa",
      "MCC", "Detection Rate", "Weighted Detection Rate", "Detection Prevalence",
      "Weighted Detection Prevalence", "Prevalence", "Weighted Prevalence",
      "False Neg Rate", "Weighted False Neg Rate", "False Pos Rate",
      "Weighted False Pos Rate", "False Discovery Rate", "Weighted False Discovery Rate",
      "False Omission Rate", "Weighted False Omission Rate", "Threat Score",
      "Weighted Threat Score", "AIC", "AICc", "BIC", "Predictions",
      "Confusion Matrix", "Class Level Results", "Coefficients", "Convergence Warnings",
      "Other Warnings", "Warnings and Messages", "Process", "HParams",
      "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::smpl(xpectr::element_classes(vld_multinomial), n = 30),
    c("numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "list", "list",
      "list", "integer", "integer", "list", "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::smpl(xpectr::element_types(vld_multinomial), n = 30),
    c("double", "double", "double", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double", "double", "double", "list", "list", "list", "integer",
      "integer", "list", "list", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(vld_multinomial),
    c(2L, 50L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(vld_multinomial)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'vld_multinomial'                                     ####


  ## Testing 'dplyr::bind_rows(vld_multinomial$HParams)'                    ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- dplyr::bind_rows(vld_multinomial$HParams)
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["a"]],
    c(1, 2),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["b"]],
    c(1, 1),
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("a", "b"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("double", "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(2L, 2L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'dplyr::bind_rows(vld_multinomial$HParams)'           ####


  ## Testing 'dplyr::bind_rows(vld_multinomial$`Warnings a...'              ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- dplyr::bind_rows(vld_multinomial$`Warnings and Messages`)
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["Function"]],
    character(0),
    fixed = TRUE)
  expect_equal(
    output_19148[["Type"]],
    character(0),
    fixed = TRUE)
  expect_equal(
    output_19148[["Message"]],
    character(0),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("Function", "Type", "Message"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("character", "character", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("character", "character", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(0L, 3L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'dplyr::bind_rows(vld_multinomial$`Warnings a...'     ####

  ## Testing 'dplyr::bind_rows(vld_multinomial$Coefficients)'               ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- dplyr::bind_rows(vld_multinomial$Coefficients)
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["term"]],
    c(NA, NA))
  expect_equal(
    output_19148[["estimate"]],
    c(NA, NA))
  expect_equal(
    output_19148[["std.error"]],
    c(NA, NA))
  expect_equal(
    output_19148[["statistic"]],
    c(NA, NA))
  expect_equal(
    output_19148[["p.value"]],
    c(NA, NA))
  # Testing column names
  expect_equal(
    names(output_19148),
    c("term", "estimate", "std.error", "statistic", "p.value"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("logical", "logical", "logical", "logical", "logical"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("logical", "logical", "logical", "logical", "logical"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(2L, 5L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'dplyr::bind_rows(vld_multinomial$Coefficients)'      ####


  ## Testing 'dplyr::bind_rows(vld_multinomial$`Class Leve...'              ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- dplyr::bind_rows(vld_multinomial$`Class Level Results`)
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["Class"]],
    c("class_1", "class_2", "class_3", "class_1", "class_2", "class_3"),
    fixed = TRUE)
  expect_equal(
    output_19148[["Balanced Accuracy"]],
    c(0.5, 0.39394, 0.47727, 0.5, 0.39394, 0.47727),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Accuracy"]],
    c(0.70588, 0.41176, 0.47059, 0.70588, 0.41176, 0.47059),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["F1"]],
    c(NaN, 0.28571, 0.4, NaN, 0.28571, 0.4),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Sensitivity"]],
    c(0, 0.33333, 0.5, 0, 0.33333, 0.5),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Specificity"]],
    c(1, 0.45455, 0.45455, 1, 0.45455, 0.45455),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Pos Pred Value"]],
    c(NaN, 0.25, 0.33333, NaN, 0.25, 0.33333),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Neg Pred Value"]],
    c(0.70588, 0.55556, 0.625, 0.70588, 0.55556, 0.625),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Kappa"]],
    c(0, -0.19718, -0.04082, 0, -0.19718, -0.04082),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Detection Rate"]],
    c(0, 0.11765, 0.17647, 0, 0.11765, 0.17647),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Detection Prevalence"]],
    c(0, 0.47059, 0.52941, 0, 0.47059, 0.52941),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Prevalence"]],
    c(0.29412, 0.35294, 0.35294, 0.29412, 0.35294, 0.35294),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["False Neg Rate"]],
    c(1, 0.66667, 0.5, 1, 0.66667, 0.5),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["False Pos Rate"]],
    c(0, 0.54545, 0.54545, 0, 0.54545, 0.54545),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["False Discovery Rate"]],
    c(NaN, 0.75, 0.66667, NaN, 0.75, 0.66667),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["False Omission Rate"]],
    c(0.29412, 0.44444, 0.375, 0.29412, 0.44444, 0.375),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Threat Score"]],
    c(0, 0.16667, 0.25, 0, 0.16667, 0.25),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Support"]],
    c(5, 6, 6, 5, 6, 6),
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("Class", "Balanced Accuracy", "Accuracy", "F1", "Sensitivity",
      "Specificity", "Pos Pred Value", "Neg Pred Value", "Kappa",
      "Detection Rate", "Detection Prevalence", "Prevalence", "False Neg Rate",
      "False Pos Rate", "False Discovery Rate", "False Omission Rate",
      "Threat Score", "Support", "Results", "Confusion Matrix"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("character", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "integer",
      "list", "list"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("character", "double", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "integer",
      "list", "list"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(6L, 20L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'dplyr::bind_rows(vld_multinomial$`Class Leve...'     ####


  ## Testing 'dplyr::bind_rows(vld_multinomial$`Confusion ...'              ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- dplyr::bind_rows(vld_multinomial$`Confusion Matrix`)
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["Prediction"]],
    c("class_1", "class_2", "class_3", "class_1", "class_2", "class_3",
      "class_1", "class_2", "class_3", "class_1", "class_2", "class_3",
      "class_1", "class_2", "class_3", "class_1", "class_2", "class_3"),
    fixed = TRUE)
  expect_equal(
    output_19148[["Target"]],
    c("class_1", "class_1", "class_1", "class_2", "class_2", "class_2",
      "class_3", "class_3", "class_3", "class_1", "class_1", "class_1",
      "class_2", "class_2", "class_2", "class_3", "class_3", "class_3"),
    fixed = TRUE)
  expect_equal(
    output_19148[["N"]],
    c(0, 3, 2, 0, 2, 4, 0, 3, 3, 0, 3, 2, 0, 2, 4, 0, 3, 3),
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("Prediction", "Target", "N"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("character", "character", "integer"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("character", "character", "integer"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(18L, 3L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'dplyr::bind_rows(vld_multinomial$`Confusion ...'     ####


  ## Testing 'dplyr::bind_rows(vld_multinomial$Predictions)'                ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- dplyr::bind_rows(vld_multinomial$Predictions)
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    xpectr::smpl(output_19148[["Observation"]], n = 30),
    c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 1, 2, 3,
      4, 6, 7, 8, 9, 10, 11, 14, 15, 16, 17),
    tolerance = 1e-4)
  expect_equal(
    xpectr::smpl(output_19148[["Target"]], n = 30),
    c("class_1", "class_1", "class_1", "class_1", "class_1", "class_2",
      "class_2", "class_2", "class_2", "class_2", "class_2", "class_3",
      "class_3", "class_3", "class_3", "class_3", "class_1", "class_1",
      "class_1", "class_1", "class_2", "class_2", "class_2", "class_2",
      "class_2", "class_2", "class_3", "class_3", "class_3", "class_3"),
    fixed = TRUE)
  expect_equal(
    xpectr::smpl(output_19148[["Predicted Class"]], n = 30),
    c("class_3", "class_2", "class_2", "class_3", "class_2", "class_3",
      "class_3", "class_2", "class_2", "class_3", "class_3", "class_2",
      "class_3", "class_2", "class_2", "class_3", "class_3", "class_2",
      "class_2", "class_3", "class_3", "class_3", "class_2", "class_2",
      "class_3", "class_3", "class_3", "class_2", "class_2", "class_3"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("Observation", "Target", "Prediction", "Predicted Class"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("integer", "character",
      ifelse(is_dplyr_1(), "vctrs_list_of", "list"),
      "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("integer", "character", "list", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(34L, 4L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'dplyr::bind_rows(vld_multinomial$Predictions)'       ####


  ## Testing 'vld_multinomial$Process[[1]]'                                 ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Unsupported class: using fallback tests
  # Testing class
  expect_equal(
    class(vld_multinomial$Process[[1]]),
    "process_info_multinomial",
    fixed = TRUE)
  # Testing type
  expect_type(
    vld_multinomial$Process[[1]],
    type = "list")
  # Testing names
  expect_equal(
    names(vld_multinomial$Process[[1]]),
    c("Target Column", "Prediction Columns", "ID Column", "Family",
      "Classes", "Softmax Applied", "Target Summary", "Prediction Summary",
      "Locale"),
    fixed = TRUE)
  # Testing dput() content
  # NOTE: Manually tested
  expect_equal(
    vld_multinomial$Process[[1]]$`Target Column`,
    "target"
  )
  expect_equal(
    vld_multinomial$Process[[1]]$`Prediction Column`,
    c("class_1", "class_2", "class_3")
  )
  expect_equal(
    vld_multinomial$Process[[1]]$`ID Column`,
    NULL
  )
  expect_equal(
    vld_multinomial$Process[[1]]$`Family`,
    "Multinomial"
  )
  expect_equal(
    vld_multinomial$Process[[1]]$Classes,
    c("class_1", "class_2", "class_3")
  )
  expect_equal(
    vld_multinomial$Process[[1]]$`Softmax Applied`,
    NULL
  )
  expect_equal(
    vld_multinomial$Process[[1]]$`Target Summary`,
    list(Total = 17L,
         `Class Counts` = c(class_1 = 5, class_2 = 6, class_3 = 6))
  )
  expect_equal(
    vld_multinomial$Process[[1]]$`Prediction Summary`,
    list(Total = 17L,
         `Class Counts` = c(class_1 = 0, class_2 = 8, class_3 = 9))
  )
  expect_true(
    vld_multinomial$Process[[1]]$`Locale` %in%
    c("en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
      "C/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
      Sys.getlocale())
  )

  ## Finished testing 'vld_multinomial$Process[[1]]'                        ####



})

test_that("testing nested tibbles in gaussian validate_fn()", {

  # Load data and fold it
  xpectr::set_test_seed(3)
  dat_ready <- participant.scores %>%
    dplyr::mutate(diagnosis = as.factor(diagnosis)) %>%
    groupdata2::partition(p = 0.75,
                          cat_col = "diagnosis",
                          id_col = "participant",
                          list_out = FALSE)

  lm_model_fn <- function(train_data, formula, hyperparameters){
    lm(formula = formula, data = train_data)
  }

  lm_predict_fn <- predict_functions("lm")

  # Generate expectations for 'validate_fn'
  # Tip: comment out the gxs_function() call
  # so it is easy to regenerate the tests
  xpectr::set_test_seed(42)

  vld_gauss <- validate_fn(train_data = dat_ready, formulas = "score ~ diagnosis", type = "gaussian", model_fn = lm_model_fn, predict_fn = lm_predict_fn, test_data = NULL, preprocess_fn = NULL, preprocess_once = FALSE, hyperparameters = list(a = c(1, 2), b = c(2)), partitions_col = ".partitions", metrics = list(all = FALSE, RMSE = TRUE), rm_nc = FALSE, parallel = FALSE, verbose = FALSE)


  ## Testing 'vld_gauss'                                                    ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Testing class
  expect_equal(
    class(vld_gauss),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    vld_gauss[["Fixed"]],
    c("diagnosis", "diagnosis"),
    fixed = TRUE)
  expect_equal(
    vld_gauss[["RMSE"]],
    c(12.39997, 12.39997),
    tolerance = 1e-4)
  expect_equal(
    vld_gauss[["Convergence Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    vld_gauss[["Other Warnings"]],
    c(0, 0),
    tolerance = 1e-4)
  expect_equal(
    vld_gauss[["Dependent"]],
    c("score", "score"),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(vld_gauss),
    c("Fixed", "RMSE", "Predictions", "Coefficients", "Convergence Warnings",
      "Other Warnings", "Warnings and Messages", "Process", "HParams",
      "Model", "Dependent"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(vld_gauss),
    c("character", "numeric", "list", "list", "integer", "integer",
      "list", "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(vld_gauss),
    c("character", "double", "list", "list", "integer", "integer", "list",
      "list", "list", "list", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(vld_gauss),
    c(2L, 11L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(vld_gauss)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'vld_gauss'                                           ####


  ## Testing 'dplyr::bind_rows(vld_gauss$HParams)'                          ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- dplyr::bind_rows(vld_gauss$HParams)
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["a"]],
    c(1, 2),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["b"]],
    c(2, 2),
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("a", "b"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("double", "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(2L, 2L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'dplyr::bind_rows(vld_gauss$HParams)'                 ####


  ## Testing 'dplyr::bind_rows(vld_gauss$`Warnings and Mes...'              ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- dplyr::bind_rows(vld_gauss$`Warnings and Messages`)
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["Function"]],
    character(0),
    fixed = TRUE)
  expect_equal(
    output_19148[["Type"]],
    character(0),
    fixed = TRUE)
  expect_equal(
    output_19148[["Message"]],
    character(0),
    fixed = TRUE)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("Function", "Type", "Message"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("character", "character", "character"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("character", "character", "character"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(0L, 3L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'dplyr::bind_rows(vld_gauss$`Warnings and Mes...'     ####


  ## Testing 'dplyr::bind_rows(vld_gauss$Coefficients)'                     ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- dplyr::bind_rows(vld_gauss$Coefficients)
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["term"]],
    c("(Intercept)", "diagnosis1", "(Intercept)", "diagnosis1"),
    fixed = TRUE)
  expect_equal(
    output_19148[["estimate"]],
    c(51, -17.83333, 51, -17.83333),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["std.error"]],
    c(6.19926, 8.20085, 6.19926, 8.20085),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["statistic"]],
    c(8.22679, -2.17457, 8.22679, -2.17457),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["p.value"]],
    c(0, 0.0425, 0, 0.0425),
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("term", "estimate", "std.error", "conf.level",
      "conf.low", "conf.high", "statistic", "df.error", "p.value"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("character", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "integer", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("character", "double", "double", "double",
      "double",  "double", "double", "integer", "double"),
    fixed = TRUE)
  # Testing dimensions
  # expect_equal(
  #   dim(output_19148),
  #   4:9)
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'dplyr::bind_rows(vld_gauss$Coefficients)'            ####


  ## Testing 'dplyr::bind_rows(vld_gauss$Predictions)'                      ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- dplyr::bind_rows(vld_gauss$Predictions)
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["Observation"]],
    c(1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Target"]],
    c(33, 53, 66, 15, 30, 40, 14, 25, 30, 33, 53, 66, 15, 30, 40, 14,
      25, 30),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Prediction"]],
    c(51, 51, 51, 33.16667, 33.16667, 33.16667, 33.16667, 33.16667,
      33.16667, 51, 51, 51, 33.16667, 33.16667, 33.16667, 33.16667,
      33.16667, 33.16667),
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("Observation", "Target", "Prediction"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("integer", "numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("integer", "double", "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(18L, 3L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'dplyr::bind_rows(vld_gauss$Predictions)'             ####


  ## Testing 'vld_gauss$Process[[1]]'                                       ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Unsupported class: using fallback tests
  # Testing class
  expect_equal(
    class(vld_gauss$Process[[1]]),
    "process_info_gaussian",
    fixed = TRUE)
  # Testing type
  expect_type(
    vld_gauss$Process[[1]],
    type = "list")
  # Testing names
  expect_equal(
    names(vld_gauss$Process[[1]]),
    c("Target Column", "Prediction Column", "ID Column", "Family", "Target Summary",
      "Prediction Summary", "Locale"),
    fixed = TRUE)
  # Testing dput() content
  # NOTE: Manually tested
  expect_equal(
    vld_gauss$Process[[1]]$`Target Column`,
    "target"
  )
  expect_equal(
    vld_gauss$Process[[1]]$`Prediction Column`,
    "prediction"
  )
  expect_equal(
    vld_gauss$Process[[1]]$`ID Column`,
    NULL
  )
  expect_equal(
    vld_gauss$Process[[1]]$`Family`,
    "Gaussian"
  )
  expect_equal(
    vld_gauss$Process[[1]]$`Target Summary`,
    list(Mean = 34, Median = 30, Range = c(14, 66), SD = 16.9410743460974, IQR = 15)
  )
  expect_equal(
    vld_gauss$Process[[1]]$`Prediction Summary`,
    list(Mean = 39.1111111111111, Median = 33.1666666666667,
         Range = c(33.1666666666667, 51), SD = 8.91666666666666,
         IQR = 17.8333333333333)
  )
  expect_true(
    vld_gauss$Process[[1]]$`Locale` %in%
      c("en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
        "C/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8",
        Sys.getlocale())
  )

  ## Finished testing 'vld_gauss$Process[[1]]'                        ####

})
LudvigOlsen/cvms documentation built on March 2, 2024, 1:54 p.m.