tests/testthat/test_confusion_matrix.R

library(cvms)
context("confusion_matrix")

# Diagnosis by score

test_that("Binomial confusion_matrix() works", {

  xpectr::set_test_seed(41)
  targets <- sample(0:1, 50, replace = TRUE)
  possible_preds <- sample(0:1, 50, replace = TRUE)
  preds <- ifelse(runif(50) > 0.7, possible_preds, targets )

  cm <- confusion_matrix(targets, preds)

  ## Testing 'cm'                                                           ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Testing class
  expect_equal(
    class(cm),
    c("cfm_results", "cfm_binomial", "tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    cm[["Positive Class"]],
    "1",
    fixed = TRUE)
  expect_equal(
    cm[["Balanced Accuracy"]],
    0.83512,
    tolerance = 1e-4)
  expect_equal(
    cm[["F1"]],
    0.85246,
    tolerance = 1e-4)
  expect_equal(
    cm[["Sensitivity"]],
    0.78788,
    tolerance = 1e-4)
  expect_equal(
    cm[["Specificity"]],
    0.88235,
    tolerance = 1e-4)
  expect_equal(
    cm[["Pos Pred Value"]],
    0.92857,
    tolerance = 1e-4)
  expect_equal(
    cm[["Neg Pred Value"]],
    0.68182,
    tolerance = 1e-4)
  expect_equal(
    cm[["Kappa"]],
    0.62562,
    tolerance = 1e-4)
  expect_equal(
    cm[["MCC"]],
    0.63961,
    tolerance = 1e-4)
  expect_equal(
    cm[["Detection Rate"]],
    0.52,
    tolerance = 1e-4)
  expect_equal(
    cm[["Detection Prevalence"]],
    0.56,
    tolerance = 1e-4)
  expect_equal(
    cm[["Prevalence"]],
    0.66,
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(cm),
    c("Confusion Matrix", "Table", "Positive Class", "Balanced Accuracy",
      "F1", "Sensitivity", "Specificity", "Pos Pred Value", "Neg Pred Value",
      "Kappa", "MCC", "Detection Rate", "Detection Prevalence", "Prevalence"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(cm),
    c("list", "list", "character", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(cm),
    c("list", "list", "character", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(cm),
    c(1L, 14L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(cm)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'cm'                                                  ####


  ## Testing 'cm[["Confusion Matrix"]][[1]]'                                ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Testing class
  expect_equal(
    class(cm[["Confusion Matrix"]][[1]]),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    cm[["Confusion Matrix"]][[1]][["Prediction"]],
    c("0", "1", "0", "1"),
    fixed = TRUE)
  expect_equal(
    cm[["Confusion Matrix"]][[1]][["Target"]],
    c("0", "0", "1", "1"),
    fixed = TRUE)
  expect_equal(
    cm[["Confusion Matrix"]][[1]][["Pos_0"]],
    c("TP", "FN", "FP", "TN"),
    fixed = TRUE)
  expect_equal(
    cm[["Confusion Matrix"]][[1]][["Pos_1"]],
    c("TN", "FP", "FN", "TP"),
    fixed = TRUE)
  expect_equal(
    cm[["Confusion Matrix"]][[1]][["N"]],
    c(15, 2, 7, 26),
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(cm[["Confusion Matrix"]][[1]]),
    c("Prediction", "Target", "Pos_0", "Pos_1", "N"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(cm[["Confusion Matrix"]][[1]]),
    c("character", "character", "character", "character", "integer"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(cm[["Confusion Matrix"]][[1]]),
    c("character", "character", "character", "character", "integer"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(cm[["Confusion Matrix"]][[1]]),
    4:5)
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(cm[["Confusion Matrix"]][[1]])),
    character(0),
    fixed = TRUE)
  ## Finished testing 'cm[["Confusion Matrix"]][[1]]'                       ####

  # TODO test the table
  # cm[["Table"]][[1]]

})

test_that("Multinomial confusion_matrix() works", {

  xpectr::set_test_seed(41)
  targets <- sample(1:4, 50, replace = TRUE)
  possible_preds <- sample(1:4, 50, replace = TRUE)
  preds <- ifelse(runif(50) > 0.7, possible_preds, targets )

  cm <- confusion_matrix(targets, preds)


  ## Testing 'cm'                                                           ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Testing class
  expect_equal(
    class(cm),
    c("cfm_results", "cfm_multinomial", "tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    cm[["Overall Accuracy"]],
    0.72,
    tolerance = 1e-4)
  expect_equal(
    cm[["Balanced Accuracy"]],
    0.81353,
    tolerance = 1e-4)
  expect_equal(
    cm[["F1"]],
    0.70055,
    tolerance = 1e-4)
  expect_equal(
    cm[["Sensitivity"]],
    0.71736,
    tolerance = 1e-4)
  expect_equal(
    cm[["Specificity"]],
    0.90969,
    tolerance = 1e-4)
  expect_equal(
    cm[["Pos Pred Value"]],
    0.70208,
    tolerance = 1e-4)
  expect_equal(
    cm[["Neg Pred Value"]],
    0.90569,
    tolerance = 1e-4)
  expect_equal(
    cm[["Kappa"]],
    0.60867,
    tolerance = 1e-4)
  expect_equal(
    cm[["MCC"]],
    0.62725,
    tolerance = 1e-4)
  expect_equal(
    cm[["Detection Rate"]],
    0.18,
    tolerance = 1e-4)
  expect_equal(
    cm[["Detection Prevalence"]],
    0.25,
    tolerance = 1e-4)
  expect_equal(
    cm[["Prevalence"]],
    0.25,
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(cm),
    c("Confusion Matrix", "Table", "Class Level Results", "Overall Accuracy",
      "Balanced Accuracy", "F1", "Sensitivity", "Specificity", "Pos Pred Value",
      "Neg Pred Value", "Kappa", "MCC", "Detection Rate", "Detection Prevalence",
      "Prevalence"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(cm),
    c("list", "list", "list", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(cm),
    c("list", "list", "list", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double", "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(cm),
    c(1L, 15L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(cm)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'cm'                                                  ####


  ## Testing 'cm[["Confusion Matrix"]][[1]]'                                ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Testing class
  expect_equal(
    class(cm[["Confusion Matrix"]][[1]]),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    cm[["Confusion Matrix"]][[1]][["Prediction"]],
    c("1", "2", "3", "4", "1", "2", "3", "4", "1", "2", "3", "4", "1",
      "2", "3", "4"),
    fixed = TRUE)
  expect_equal(
    cm[["Confusion Matrix"]][[1]][["Target"]],
    c("1", "1", "1", "1", "2", "2", "2", "2", "3", "3", "3", "3", "4",
      "4", "4", "4"),
    fixed = TRUE)
  expect_equal(
    cm[["Confusion Matrix"]][[1]][["N"]],
    c(7, 1, 1, 0, 2, 5, 0, 1, 0, 2, 12, 1, 4, 1, 1, 12),
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(cm[["Confusion Matrix"]][[1]]),
    c("Prediction", "Target", "N"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(cm[["Confusion Matrix"]][[1]]),
    c("character", "character", "integer"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(cm[["Confusion Matrix"]][[1]]),
    c("character", "character", "integer"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(cm[["Confusion Matrix"]][[1]]),
    c(16L, 3L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(cm[["Confusion Matrix"]][[1]])),
    character(0),
    fixed = TRUE)
  ## Finished testing 'cm[["Confusion Matrix"]][[1]]'                       ####


  ## Testing 'cm[["Class Level Results"]][[1]]'                             ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- cm[["Class Level Results"]][[1]]
  # Testing class
  expect_equal(
    class(output_19148),
    c("tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["Class"]],
    c("1", "2", "3", "4"),
    fixed = TRUE)
  expect_equal(
    output_19148[["Support"]],
    c(9, 8, 15, 18),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Balanced Accuracy"]],
    c(0.81572, 0.76488, 0.87143, 0.80208),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Sensitivity"]],
    c(0.77778, 0.625, 0.8, 0.66667),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Specificity"]],
    c(0.85366, 0.90476, 0.94286, 0.9375),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Pos Pred Value"]],
    c(0.53846, 0.55556, 0.85714, 0.85714),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Neg Pred Value"]],
    c(0.94595, 0.92683, 0.91667, 0.83333),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["F1"]],
    c(0.63636, 0.58824, 0.82759, 0.75),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Kappa"]],
    c(0.53811, 0.50425, 0.75728, 0.63504),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Prevalence"]],
    c(0.18, 0.16, 0.3, 0.36),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Detection Rate"]],
    c(0.14, 0.1, 0.24, 0.24),
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Detection Prevalence"]],
    c(0.26, 0.18, 0.28, 0.28),
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("Class", "Support", "Confusion Matrix", "Table", "Balanced Accuracy",
      "Sensitivity", "Specificity", "Pos Pred Value", "Neg Pred Value",
      "F1", "Kappa", "Prevalence", "Detection Rate", "Detection Prevalence"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("character", "integer", "list", "list", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("character", "integer", "list", "list", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(4L, 14L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'cm[["Class Level Results"]][[1]]'                    ####

  # TODO Test table
  # cm[["Table"]][[1]]


})

test_that("different levels in predictions and targets works in confusion_matrix()", {

  xpectr::set_test_seed(42)
  targets <- sample(0:1, 50, replace = TRUE)
  possible_preds <- sample(0:2, 50, replace = TRUE)
  preds <- ifelse(runif(50) > 0.7, possible_preds, targets )

  cm <- confusion_matrix(targets, preds)

  ## Testing 'cm'                                                           ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Testing class
  expect_equal(
    class(cm),
    c("cfm_results", "cfm_multinomial", "tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    cm[["Overall Accuracy"]],
    0.76,
    tolerance = 1e-4)
  expect_equal(
    cm[["Balanced Accuracy"]],
    NaN,
    tolerance = 1e-4)
  expect_equal(
    cm[["F1"]],
    NaN,
    tolerance = 1e-4)
  expect_equal(
    cm[["Sensitivity"]],
    NaN,
    tolerance = 1e-4)
  expect_equal(
    cm[["Specificity"]],
    0.90366,
    tolerance = 1e-4)
  expect_equal(
    cm[["Pos Pred Value"]],
    0.60317,
    tolerance = 1e-4)
  expect_equal(
    cm[["Neg Pred Value"]],
    0.86207,
    tolerance = 1e-4)
  expect_equal(
    cm[["Kappa"]],
    0.45507,
    tolerance = 1e-4)
  expect_equal(
    cm[["MCC"]],
    0.62823,
    tolerance = 1e-4)
  expect_equal(
    cm[["Detection Rate"]],
    0.25333,
    tolerance = 1e-4)
  expect_equal(
    cm[["Detection Prevalence"]],
    0.33333,
    tolerance = 1e-4)
  expect_equal(
    cm[["Prevalence"]],
    0.33333,
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(cm),
    c("Confusion Matrix", "Table", "Class Level Results", "Overall Accuracy",
      "Balanced Accuracy", "F1", "Sensitivity", "Specificity", "Pos Pred Value",
      "Neg Pred Value", "Kappa", "MCC", "Detection Rate", "Detection Prevalence",
      "Prevalence"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(cm),
    c("list", "list", "list", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(cm),
    c("list", "list", "list", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double", "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(cm),
    c(1L, 15L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(cm)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'cm'                                                  ####


  cm <- confusion_matrix(preds, targets)


  ## Testing 'cm'                                                           ####
  ## Initially generated by xpectr
  xpectr::set_test_seed(42)
  # Testing class
  expect_equal(
    class(cm),
    c("cfm_results", "cfm_multinomial", "tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    cm[["Overall Accuracy"]],
    0.76,
    tolerance = 1e-4)
  expect_equal(
    cm[["Balanced Accuracy"]],
    0.73262,
    tolerance = 1e-4)
  expect_equal(
    cm[["F1"]],
    NaN,
    tolerance = 1e-4)
  expect_equal(
    cm[["Sensitivity"]],
    0.60317,
    tolerance = 1e-4)
  expect_equal(
    cm[["Specificity"]],
    0.86207,
    tolerance = 1e-4)
  expect_equal(
    cm[["Pos Pred Value"]],
    NaN,
    tolerance = 1e-4)
  expect_equal(
    cm[["Neg Pred Value"]],
    0.90366,
    tolerance = 1e-4)
  expect_equal(
    cm[["Kappa"]],
    0.45507,
    tolerance = 1e-4)
  expect_equal(
    cm[["MCC"]],
    0.62823,
    tolerance = 1e-4)
  expect_equal(
    cm[["Detection Rate"]],
    0.25333,
    tolerance = 1e-4)
  expect_equal(
    cm[["Detection Prevalence"]],
    0.33333,
    tolerance = 1e-4)
  expect_equal(
    cm[["Prevalence"]],
    0.33333,
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(cm),
    c("Confusion Matrix", "Table", "Class Level Results", "Overall Accuracy",
      "Balanced Accuracy", "F1", "Sensitivity", "Specificity", "Pos Pred Value",
      "Neg Pred Value", "Kappa", "MCC", "Detection Rate", "Detection Prevalence",
      "Prevalence"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(cm),
    c("list", "list", "list", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(cm),
    c("list", "list", "list", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double", "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(cm),
    c(1L, 15L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(cm)),
    character(0),
    fixed = TRUE)
  ## Finished testing 'cm'                                                  ####


})

test_that("fuzzing confusion_matrix()", {

  xpectr::set_test_seed(41)
  targets <- sample(0:1, 50, replace = TRUE)
  possible_preds <- sample(0:1, 50, replace = TRUE)
  preds <- ifelse(runif(50) > 0.7, possible_preds, targets )

  # Generate expectations for 'confusion_matrix'
  # Tip: comment out the gxs_function() call
  # so it is easy to regenerate the tests
  xpectr::set_test_seed(42)
  # xpectr::gxs_function(
  #   fn = confusion_matrix,
  #   args_values = list(
  #     "targets" = list(targets, NA, 1, integer(0)),
  #     "predictions" = list(preds, NA, 1, integer(0)),
  #     "metrics" = list(list(), binomial_metrics(accuracy = FALSE), "hej", NA),
  #     "positive" = list(2, 1),
  #     "c_levels" = list(NULL, c(0,1), c("0","1"), c(3,4), c(0,1,2,3)),
  #     "do_one_vs_all" = list(TRUE, FALSE, NA),
  #     "parallel" = list(FALSE, NULL)
  #   ),
  #   indentation = 2
  # )

  ## Testing 'confusion_matrix'                                               ####
  ## Initially generated by xpectr
  # Testing different combinations of argument values

  # Testing confusion_matrix(targets = targets, predicti...
  xpectr::set_test_seed(42)
  # Assigning output
  output_19148 <- confusion_matrix(targets = targets, predictions = preds, metrics = list(), positive = 2, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE)
  # Testing class
  expect_equal(
    class(output_19148),
    c("cfm_results", "cfm_binomial", "tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19148[["Positive Class"]],
    "1",
    fixed = TRUE)
  expect_equal(
    output_19148[["Balanced Accuracy"]],
    0.83512,
    tolerance = 1e-4)
  expect_equal(
    output_19148[["F1"]],
    0.85246,
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Sensitivity"]],
    0.78788,
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Specificity"]],
    0.88235,
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Pos Pred Value"]],
    0.92857,
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Neg Pred Value"]],
    0.68182,
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Kappa"]],
    0.62562,
    tolerance = 1e-4)
  expect_equal(
    output_19148[["MCC"]],
    0.63961,
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Detection Rate"]],
    0.52,
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Detection Prevalence"]],
    0.56,
    tolerance = 1e-4)
  expect_equal(
    output_19148[["Prevalence"]],
    0.66,
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_19148),
    c("Confusion Matrix", "Table", "Positive Class", "Balanced Accuracy",
      "F1", "Sensitivity", "Specificity", "Pos Pred Value", "Neg Pred Value",
      "Kappa", "MCC", "Detection Rate", "Detection Prevalence", "Prevalence"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19148),
    c("list", "list", "character", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19148),
    c("list", "list", "character", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19148),
    c(1L, 14L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19148)),
    character(0),
    fixed = TRUE)

  # Testing confusion_matrix(targets = NA, predictions =...
  # Changed from baseline: targets = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19370 <- xpectr::capture_side_effects(confusion_matrix(targets = NA, predictions = preds, metrics = list(), positive = 2, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19370[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'targets': Contains missing values (element 1)."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19370[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = 1, predictions = ...
  # Changed from baseline: targets = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_12861 <- xpectr::capture_side_effects(confusion_matrix(targets = 1, predictions = preds, metrics = list(), positive = 2, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12861[['error']]),
    xpectr::strip("1 assertions failed:\n * 'targets' and 'predictions' must have same length."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_12861[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = integer(0), predi...
  # Changed from baseline: targets = integer(0)
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_18304 <- xpectr::capture_side_effects(confusion_matrix(targets = integer(0), predictions = preds, metrics = list(), positive = 2, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18304[['error']]),
    xpectr::strip("1 assertions failed:\n * 'targets' and 'predictions' must have same length."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_18304[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = NULL, predictions...
  # Changed from baseline: targets = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16417 <- xpectr::capture_side_effects(confusion_matrix(targets = NULL, predictions = preds, metrics = list(), positive = 2, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16417[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'targets': Must be of type 'vector', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16417[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: predictions = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_15190 <- xpectr::capture_side_effects(confusion_matrix(targets = targets, predictions = NA, metrics = list(), positive = 2, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15190[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'predictions': Contains missing values (element 1)."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_15190[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: predictions = 1
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_17365 <- xpectr::capture_side_effects(confusion_matrix(targets = targets, predictions = 1, metrics = list(), positive = 2, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17365[['error']]),
    xpectr::strip("1 assertions failed:\n * 'targets' and 'predictions' must have same length."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17365[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: predictions = integer(0)
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_11346 <- xpectr::capture_side_effects(confusion_matrix(targets = targets, predictions = integer(0), metrics = list(), positive = 2, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11346[['error']]),
    xpectr::strip("1 assertions failed:\n * 'targets' and 'predictions' must have same length."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11346[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: predictions = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_16569 <- xpectr::capture_side_effects(confusion_matrix(targets = targets, predictions = NULL, metrics = list(), positive = 2, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16569[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'predictions': Must be of type 'vector', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_16569[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: metrics = binomial_me...
  xpectr::set_test_seed(42)
  # Assigning output
  output_17050 <- confusion_matrix(targets = targets, predictions = preds, metrics = binomial_metrics(accuracy = FALSE), positive = 2, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE)
  # Testing class
  expect_equal(
    class(output_17050),
    c("cfm_results", "cfm_binomial", "tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_17050[["Positive Class"]],
    "1",
    fixed = TRUE)
  expect_equal(
    output_17050[["Balanced Accuracy"]],
    0.83512,
    tolerance = 1e-4)
  expect_equal(
    output_17050[["F1"]],
    0.85246,
    tolerance = 1e-4)
  expect_equal(
    output_17050[["Sensitivity"]],
    0.78788,
    tolerance = 1e-4)
  expect_equal(
    output_17050[["Specificity"]],
    0.88235,
    tolerance = 1e-4)
  expect_equal(
    output_17050[["Pos Pred Value"]],
    0.92857,
    tolerance = 1e-4)
  expect_equal(
    output_17050[["Neg Pred Value"]],
    0.68182,
    tolerance = 1e-4)
  expect_equal(
    output_17050[["Kappa"]],
    0.62562,
    tolerance = 1e-4)
  expect_equal(
    output_17050[["MCC"]],
    0.63961,
    tolerance = 1e-4)
  expect_equal(
    output_17050[["Detection Rate"]],
    0.52,
    tolerance = 1e-4)
  expect_equal(
    output_17050[["Detection Prevalence"]],
    0.56,
    tolerance = 1e-4)
  expect_equal(
    output_17050[["Prevalence"]],
    0.66,
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_17050),
    c("Confusion Matrix", "Table", "Positive Class", "Balanced Accuracy",
      "F1", "Sensitivity", "Specificity", "Pos Pred Value", "Neg Pred Value",
      "Kappa", "MCC", "Detection Rate", "Detection Prevalence", "Prevalence"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_17050),
    c("list", "list", "character", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_17050),
    c("list", "list", "character", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_17050),
    c(1L, 14L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_17050)),
    character(0),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: metrics = "hej"
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14577 <- xpectr::capture_side_effects(confusion_matrix(targets = targets, predictions = preds, metrics = "hej", positive = 2, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14577[['error']]),
    xpectr::strip("1 assertions failed:\n * 'metrics' contained unknown metric name: hej."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14577[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: metrics = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_17191 <- xpectr::capture_side_effects(confusion_matrix(targets = targets, predictions = preds, metrics = NA, positive = 2, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17191[['error']]),
    xpectr::strip("Assertion failed. One of the following must apply:\n * checkmate::check_list(metrics): Must be of type 'list', not 'logical'\n * checkmate::check_character(metrics): Contains missing values (element 1)"),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_17191[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: metrics = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19346 <- xpectr::capture_side_effects(confusion_matrix(targets = targets, predictions = preds, metrics = NULL, positive = 2, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19346[['error']]),
    xpectr::strip("Assertion failed. One of the following must apply:\n * checkmate::check_list(metrics): Must be of type 'list', not 'NULL'\n * checkmate::check_character(metrics): Must be of type 'character', not 'NULL'"),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19346[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: positive = 1
  xpectr::set_test_seed(42)
  # Assigning output
  output_12554 <- confusion_matrix(targets = targets, predictions = preds, metrics = list(), positive = 1, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE)
  # Testing class
  expect_equal(
    class(output_12554),
    c("cfm_results", "cfm_binomial", "tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_12554[["Positive Class"]],
    "0",
    fixed = TRUE)
  expect_equal(
    output_12554[["Balanced Accuracy"]],
    0.83512,
    tolerance = 1e-4)
  expect_equal(
    output_12554[["F1"]],
    0.76923,
    tolerance = 1e-4)
  expect_equal(
    output_12554[["Sensitivity"]],
    0.88235,
    tolerance = 1e-4)
  expect_equal(
    output_12554[["Specificity"]],
    0.78788,
    tolerance = 1e-4)
  expect_equal(
    output_12554[["Pos Pred Value"]],
    0.68182,
    tolerance = 1e-4)
  expect_equal(
    output_12554[["Neg Pred Value"]],
    0.92857,
    tolerance = 1e-4)
  expect_equal(
    output_12554[["Kappa"]],
    0.62562,
    tolerance = 1e-4)
  expect_equal(
    output_12554[["MCC"]],
    0.63961,
    tolerance = 1e-4)
  expect_equal(
    output_12554[["Detection Rate"]],
    0.3,
    tolerance = 1e-4)
  expect_equal(
    output_12554[["Detection Prevalence"]],
    0.44,
    tolerance = 1e-4)
  expect_equal(
    output_12554[["Prevalence"]],
    0.34,
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_12554),
    c("Confusion Matrix", "Table", "Positive Class", "Balanced Accuracy",
      "F1", "Sensitivity", "Specificity", "Pos Pred Value", "Neg Pred Value",
      "Kappa", "MCC", "Detection Rate", "Detection Prevalence", "Prevalence"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_12554),
    c("list", "list", "character", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_12554),
    c("list", "list", "character", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_12554),
    c(1L, 14L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_12554)),
    character(0),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: positive = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_14622 <- xpectr::capture_side_effects(confusion_matrix(targets = targets, predictions = preds, metrics = list(), positive = NULL, c_levels = NULL, do_one_vs_all = TRUE, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14622[['error']]),
    xpectr::strip("Assertion failed. One of the following must apply:\n * checkmate::check_choice(positive): Must be a subset of {'1','2'}, not 'NULL'\n * checkmate::check_string(positive): Must be of type 'string', not 'NULL'"),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_14622[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: c_levels = c(0, 1)
  xpectr::set_test_seed(42)
  # Assigning output
  output_19400 <- confusion_matrix(targets = targets, predictions = preds, metrics = list(), positive = 2, c_levels = c(0, 1), do_one_vs_all = TRUE, parallel = FALSE)
  # Testing class
  expect_equal(
    class(output_19400),
    c("cfm_results", "cfm_binomial", "tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19400[["Positive Class"]],
    "1",
    fixed = TRUE)
  expect_equal(
    output_19400[["Balanced Accuracy"]],
    0.83512,
    tolerance = 1e-4)
  expect_equal(
    output_19400[["F1"]],
    0.85246,
    tolerance = 1e-4)
  expect_equal(
    output_19400[["Sensitivity"]],
    0.78788,
    tolerance = 1e-4)
  expect_equal(
    output_19400[["Specificity"]],
    0.88235,
    tolerance = 1e-4)
  expect_equal(
    output_19400[["Pos Pred Value"]],
    0.92857,
    tolerance = 1e-4)
  expect_equal(
    output_19400[["Neg Pred Value"]],
    0.68182,
    tolerance = 1e-4)
  expect_equal(
    output_19400[["Kappa"]],
    0.62562,
    tolerance = 1e-4)
  expect_equal(
    output_19400[["MCC"]],
    0.63961,
    tolerance = 1e-4)
  expect_equal(
    output_19400[["Detection Rate"]],
    0.52,
    tolerance = 1e-4)
  expect_equal(
    output_19400[["Detection Prevalence"]],
    0.56,
    tolerance = 1e-4)
  expect_equal(
    output_19400[["Prevalence"]],
    0.66,
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_19400),
    c("Confusion Matrix", "Table", "Positive Class", "Balanced Accuracy",
      "F1", "Sensitivity", "Specificity", "Pos Pred Value", "Neg Pred Value",
      "Kappa", "MCC", "Detection Rate", "Detection Prevalence", "Prevalence"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19400),
    c("list", "list", "character", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19400),
    c("list", "list", "character", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19400),
    c(1L, 14L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19400)),
    character(0),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: c_levels = c("0", "1")
  xpectr::set_test_seed(42)
  # Assigning output
  output_19782 <- confusion_matrix(targets = targets, predictions = preds, metrics = list(), positive = 2, c_levels = c("0", "1"), do_one_vs_all = TRUE, parallel = FALSE)
  # Testing class
  expect_equal(
    class(output_19782),
    c("cfm_results", "cfm_binomial", "tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_19782[["Positive Class"]],
    "1",
    fixed = TRUE)
  expect_equal(
    output_19782[["Balanced Accuracy"]],
    0.83512,
    tolerance = 1e-4)
  expect_equal(
    output_19782[["F1"]],
    0.85246,
    tolerance = 1e-4)
  expect_equal(
    output_19782[["Sensitivity"]],
    0.78788,
    tolerance = 1e-4)
  expect_equal(
    output_19782[["Specificity"]],
    0.88235,
    tolerance = 1e-4)
  expect_equal(
    output_19782[["Pos Pred Value"]],
    0.92857,
    tolerance = 1e-4)
  expect_equal(
    output_19782[["Neg Pred Value"]],
    0.68182,
    tolerance = 1e-4)
  expect_equal(
    output_19782[["Kappa"]],
    0.62562,
    tolerance = 1e-4)
  expect_equal(
    output_19782[["MCC"]],
    0.63961,
    tolerance = 1e-4)
  expect_equal(
    output_19782[["Detection Rate"]],
    0.52,
    tolerance = 1e-4)
  expect_equal(
    output_19782[["Detection Prevalence"]],
    0.56,
    tolerance = 1e-4)
  expect_equal(
    output_19782[["Prevalence"]],
    0.66,
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_19782),
    c("Confusion Matrix", "Table", "Positive Class", "Balanced Accuracy",
      "F1", "Sensitivity", "Specificity", "Pos Pred Value", "Neg Pred Value",
      "Kappa", "MCC", "Detection Rate", "Detection Prevalence", "Prevalence"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_19782),
    c("list", "list", "character", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_19782),
    c("list", "list", "character", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_19782),
    c(1L, 14L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_19782)),
    character(0),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: c_levels = c(3, 4)
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_11174 <- xpectr::capture_side_effects(confusion_matrix(targets = targets, predictions = preds, metrics = list(), positive = 2, c_levels = c(3, 4), do_one_vs_all = TRUE, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11174[['error']]),
    xpectr::strip("1 assertions failed:\n * 'c_levels' does not contain all the levels in 'predictions' and 'targets'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11174[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: c_levels = c(0, 1, 2, 3)
  xpectr::set_test_seed(42)
  # Assigning output
  output_14749 <- confusion_matrix(targets = targets, predictions = preds, metrics = list(), positive = 2, c_levels = c(0, 1, 2, 3), do_one_vs_all = TRUE, parallel = FALSE)
  # Testing class
  expect_equal(
    class(output_14749),
    c("cfm_results", "cfm_multinomial", "tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_14749[["Overall Accuracy"]],
    0.82,
    tolerance = 1e-4)
  expect_equal(
    output_14749[["Balanced Accuracy"]],
    NaN,
    tolerance = 1e-4)
  expect_equal(
    output_14749[["F1"]],
    NaN,
    tolerance = 1e-4)
  expect_equal(
    output_14749[["Sensitivity"]],
    NaN,
    tolerance = 1e-4)
  expect_equal(
    output_14749[["Specificity"]],
    0.91756,
    tolerance = 1e-4)
  expect_equal(
    output_14749[["Pos Pred Value"]],
    NaN,
    tolerance = 1e-4)
  expect_equal(
    output_14749[["Neg Pred Value"]],
    0.9026,
    tolerance = 1e-4)
  expect_equal(
    output_14749[["Kappa"]],
    NaN,
    tolerance = 1e-4)
  expect_equal(
    output_14749[["MCC"]],
    0.63961,
    tolerance = 1e-4)
  expect_equal(
    output_14749[["Detection Rate"]],
    0.205,
    tolerance = 1e-4)
  expect_equal(
    output_14749[["Detection Prevalence"]],
    0.25,
    tolerance = 1e-4)
  expect_equal(
    output_14749[["Prevalence"]],
    0.25,
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_14749),
    c("Confusion Matrix", "Table", "Class Level Results", "Overall Accuracy",
      "Balanced Accuracy", "F1", "Sensitivity", "Specificity", "Pos Pred Value",
      "Neg Pred Value", "Kappa", "MCC", "Detection Rate", "Detection Prevalence",
      "Prevalence"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_14749),
    c("list", "list", "list", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_14749),
    c("list", "list", "list", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double", "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_14749),
    c(1L, 15L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_14749)),
    character(0),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: do_one_vs_all = FALSE
  xpectr::set_test_seed(42)
  # Assigning output
  output_15603 <- confusion_matrix(targets = targets, predictions = preds, metrics = list(), positive = 2, c_levels = NULL, do_one_vs_all = FALSE, parallel = FALSE)
  # Testing class
  expect_equal(
    class(output_15603),
    c("cfm_results", "cfm_binomial", "tbl_df", "tbl", "data.frame"),
    fixed = TRUE)
  # Testing column values
  expect_equal(
    output_15603[["Positive Class"]],
    "1",
    fixed = TRUE)
  expect_equal(
    output_15603[["Balanced Accuracy"]],
    0.83512,
    tolerance = 1e-4)
  expect_equal(
    output_15603[["F1"]],
    0.85246,
    tolerance = 1e-4)
  expect_equal(
    output_15603[["Sensitivity"]],
    0.78788,
    tolerance = 1e-4)
  expect_equal(
    output_15603[["Specificity"]],
    0.88235,
    tolerance = 1e-4)
  expect_equal(
    output_15603[["Pos Pred Value"]],
    0.92857,
    tolerance = 1e-4)
  expect_equal(
    output_15603[["Neg Pred Value"]],
    0.68182,
    tolerance = 1e-4)
  expect_equal(
    output_15603[["Kappa"]],
    0.62562,
    tolerance = 1e-4)
  expect_equal(
    output_15603[["MCC"]],
    0.63961,
    tolerance = 1e-4)
  expect_equal(
    output_15603[["Detection Rate"]],
    0.52,
    tolerance = 1e-4)
  expect_equal(
    output_15603[["Detection Prevalence"]],
    0.56,
    tolerance = 1e-4)
  expect_equal(
    output_15603[["Prevalence"]],
    0.66,
    tolerance = 1e-4)
  # Testing column names
  expect_equal(
    names(output_15603),
    c("Confusion Matrix", "Table", "Positive Class", "Balanced Accuracy",
      "F1", "Sensitivity", "Specificity", "Pos Pred Value", "Neg Pred Value",
      "Kappa", "MCC", "Detection Rate", "Detection Prevalence", "Prevalence"),
    fixed = TRUE)
  # Testing column classes
  expect_equal(
    xpectr::element_classes(output_15603),
    c("list", "list", "character", "numeric", "numeric", "numeric",
      "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
      "numeric", "numeric"),
    fixed = TRUE)
  # Testing column types
  expect_equal(
    xpectr::element_types(output_15603),
    c("list", "list", "character", "double", "double", "double", "double",
      "double", "double", "double", "double", "double", "double",
      "double"),
    fixed = TRUE)
  # Testing dimensions
  expect_equal(
    dim(output_15603),
    c(1L, 14L))
  # Testing group keys
  expect_equal(
    colnames(dplyr::group_keys(output_15603)),
    character(0),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: do_one_vs_all = NA
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19040 <- xpectr::capture_side_effects(confusion_matrix(targets = targets, predictions = preds, metrics = list(), positive = 2, c_levels = NULL, do_one_vs_all = NA, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19040[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'do_one_vs_all': May not be NA."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19040[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: do_one_vs_all = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_11387 <- xpectr::capture_side_effects(confusion_matrix(targets = targets, predictions = preds, metrics = list(), positive = 2, c_levels = NULL, do_one_vs_all = NULL, parallel = FALSE), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11387[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'do_one_vs_all': Must be of type 'logical flag', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_11387[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  # Testing confusion_matrix(targets = targets, predicti...
  # Changed from baseline: parallel = NULL
  xpectr::set_test_seed(42)
  # Testing side effects
  # Assigning side effects
  side_effects_19888 <- xpectr::capture_side_effects(confusion_matrix(targets = targets, predictions = preds, metrics = list(), positive = 2, c_levels = NULL, do_one_vs_all = TRUE, parallel = NULL), reset_seed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19888[['error']]),
    xpectr::strip("1 assertions failed:\n * Variable 'parallel': Must be of type 'logical flag', not 'NULL'."),
    fixed = TRUE)
  expect_equal(
    xpectr::strip(side_effects_19888[['error_class']]),
    xpectr::strip(c("simpleError", "error", "condition")),
    fixed = TRUE)

  ## Finished testing 'confusion_matrix'                                      ####
  #

})

Try the cvms package in your browser

Any scripts or data that you put into this service are public.

cvms documentation built on July 9, 2023, 6:56 p.m.