R/summary_caiser.R

Defines functions summary.CAISEr

Documented in summary.CAISEr

#' summary.CAISEr
#'
#' S3 method for summarizing _CAISEr_ objects output by [run_experiment()]).
#' Input parameters `test`, `alternative` and `sig.level` can be used to
#' override the ones used in the call to [run_experiment()].
#'
#' @inheritParams calc_nreps
#' @inheritParams calc_instances
#' @inheritParams run_experiment
#' @param object list object of class _CAISEr_
#'                     (generated by [run_experiment()])
#' @param ... other parameters to be passed down to specific
#'            summary functions (currently unused)
#'
#' @return A list object is returned invisibly, containing the details of all
#'         tests performed as well as information on the total number of runs
#'         dedicated to each algorithm.
#' @examples
#' # Example using four dummy algorithms and 100 dummy instances.
#' # See [dummyalgo()] and [dummyinstance()] for details.
#' # Generating 4 dummy algorithms here, with means 15, 10, 30, 15 and standard
#' # deviations 2, 4, 6, 8.
#' algorithms <- mapply(FUN = function(i, m, s){
#'   list(FUN   = "dummyalgo",
#'        alias = paste0("algo", i),
#'        distribution.fun  = "rnorm",
#'        distribution.pars = list(mean = m, sd = s))},
#'   i = c(alg1 = 1, alg2 = 2, alg3 = 3, alg4 = 4),
#'   m = c(15, 10, 30, 15),
#'   s = c(2, 4, 6, 8),
#'   SIMPLIFY = FALSE)
#'
#' # Generate 100 dummy instances with centered exponential distributions
#' instances <- lapply(1:100,
#'                     function(i) {rate <- runif(1, 1, 10)
#'                                  list(FUN   = "dummyinstance",
#'                                       alias = paste0("Inst.", i),
#'                                       distr = "rexp", rate = rate,
#'                                       bias  = -1 / rate)})
#'
#' my.results <- run_experiment(instances, algorithms,
#'                              d = 1, se.max = .1,
#'                              power = .9, sig.level = .05,
#'                              power.target = "mean",
#'                              dif = "perc", comparisons = "all.vs.all",
#'                              seed = 1234, ncpus = 1)
#' summary(my.results)
#'
#' # You can override some defaults if you want:
#' summary(my.results, test = "wilcoxon")
#'
#' @method summary CAISEr
#'
#' @export
#'
summary.CAISEr <- function(object, test = NULL,
                           alternative = NULL,
                           sig.level = NULL,
                           ...)
{

  # Standard value assignment and error checking
  if (is.null(test)) test <- object$Configuration$test
  if (is.null(alternative)) alternative <- object$Configuration$alternative
  if (is.null(sig.level)) sig.level <- object$Configuration$sig.level

  assertthat::assert_that("CAISEr" %in% class(object),
                          is.character(test), length(test) == 1,
                          test %in% c("t.test", "wilcoxon", "binomial"),
                          is.character(alternative), length(alternative) == 1,
                          alternative %in% c("less", "greater", "two.sided"),
                          is.numeric(sig.level), length(sig.level) == 1,
                          sig.level > 0, sig.level < 1)


  # ===========================================================================
  algonames <- as.character(unique(object$data.raw$Algorithm))
  algoruns  <- as.numeric(table(object$data.raw$Algorithm))
  algopairs <- paste(object$data.summary$Alg1,
                     object$data.summary$Alg2,
                     sep = " x ")

  # perform initial tests just to calculate p-values
  # (ignoring significance correction)
  my.tests <- vector(mode = "list", length = length(unique(algopairs)))
  for (i in seq_along(unique(algopairs))){
    tmp <- object$data.summary[algopairs == unique(algopairs)[i], ]
    my.tests[[i]]$comparison <- unique(algopairs)[i]
    my.tests[[i]]$data <- tmp
    my.tests[[i]]$d <- mean(tmp$Phi) / stats::sd(tmp$Phi)

    if (test == "t.test"){
      my.tests[[i]]$test <- stats::t.test(tmp$Phi,
                                   conf.level = 1 - sig.level,
                                   alternative = alternative)

    } else if (test == "wilcoxon"){
      my.tests[[i]]$test <- stats::wilcox.test(tmp$Phi,
                                        conf.level = 1 - sig.level,
                                        alternative = alternative)

    } else if  (test == "binomial"){
      x <- tmp$Phi[tmp$Phi != 0]
      n <- length(x)
      x <- sum(x > 0)
      my.tests[[i]]$test <- stats::binom.test(x, n,
                                       conf.level = 1 - sig.level,
                                       alternative = alternative)

    } else stop("Test", test, "not recognised in function summary.CAISEr")

    my.tests[[i]]$pval <- my.tests[[i]]$test$p.value
  }

  # Reorder the tests in increasing order of p-values
  my.tests <- my.tests[order(sapply(my.tests, function(x) x$pval))]

  # Re-evaluate tests based on corrected significance values
  alpha <- object$samplesize.calc$sig.level
  for (i in seq_along(my.tests)){
    if (test == "t.test"){
      my.tests[[i]]$test <- stats::t.test(my.tests[[i]]$data$Phi,
                                   conf.level = 1 - alpha[i],
                                   alternative = alternative)

    } else if (test == "wilcoxon"){
      my.tests[[i]]$test <- stats::wilcox.test(my.tests[[i]]$data$Phi,
                                        conf.level = 1 - alpha[i],
                                        alternative = alternative,
                                        conf.int = TRUE)

    } else if  (test == "binomial"){
      x <- my.tests[[i]]$data$Phi[my.tests[[i]]$data$Phi != 0]
      n <- length(x)
      x <- sum(x > 0)
      my.tests[[i]]$test <- stats::binom.test(x, n,
                                       conf.level = 1 - alpha[i],
                                       alternative = alternative)
    }

    my.tests[[i]]$pval <- my.tests[[i]]$test$p.value
  }


  # Print summary
  cat("#====================================")
  cat("\n CAISEr object:")
  cat("\n Number of instances sampled:", object$N)
  cat("\n Number of instances required:", object$N.star)
  cat("\n Adequate power:", !object$Underpowered)
  for (i in seq_along(algonames)){
    cat("\n Total runs of", algonames[i], ":", algoruns[i])
  }
  cat("\n#====================================")
  cat("\n Pairwise comparisons of interest:")
  cat("\n Test:", test)
  cat("\n H1:", alternative)
  cat("\n Comparisons:", object$Configuration$comparisons)
  cat("\n Alpha (FWER):", sig.level)
  cat("\n Power target:", object$Configuration$power.target)
  cat("\n Desired power:", object$Configuration$power)
  cat("\n#====================================")
  cat("\nTests using Holm's step-down procedure:")
  stflag <- FALSE
  for (i in seq_along(my.tests)){
    if (!stflag && (my.tests[[i]]$pval > alpha[i])){
      cat("\n\n ----- Stop rejecting H0 at this point -----\n")
      stflag <- TRUE
    } else cat("\n")
    cat("\n Test", i, ":", my.tests[[i]]$comparison)
    cat("\n H0:", switch(test,
                         t.test = "mean = 0",
                         wilcoxon = "median = 0",
                         binomial = "prob = 0.5"))
    cat("\n alpha\t\t=", signif(alpha[i], 4),
        "\n p-value\t=", signif(my.tests[[i]]$pval, 4))
    cat(paste0("\n Est. ", switch(test,
                                  t.test = "mean",
                                  wilcoxon = "median",
                                  binomial = "prob"),
        "\t="), signif(my.tests[[i]]$test$estimate, 4))
    cat("\n CI{1-alpha}\t= [", signif(my.tests[[i]]$test$conf.int, 4), "]")
    cat("\n d\t\t=", my.tests[[i]]$d)
  }
  cat("\n#====================================")

  # Return invisibly
  invisible(list(test.info = my.tests,
                 algoruns  = algoruns,
                 algonames = algonames,
                 algopairs = algopairs))
}
fcampelo/CAISEr documentation built on Nov. 28, 2022, 3:15 a.m.