R/utilities.R

Defines functions unnest_select_data iso_remove_list_columns nest_data iso_generate_summary_table iso_print_data_table collapse col_in_df iso_get_processor_examples iso_get_processor_example

Documented in iso_generate_summary_table iso_get_processor_example iso_get_processor_examples iso_print_data_table iso_remove_list_columns unnest_select_data

# examples =======

#' Example files
#'
#' @description The isprocessor package comes with a few example data sets to make it easy to illustrate the functionality.
#'
#' @details \code{iso_get_processor_example}: retrieve the path to an isoprocessor example data set
#' @param filename the name of the example file for which to retrieve the system path
#' @export
iso_get_processor_example <- function(filename) {
  filepath <- system.file(package = "isoprocessor", "extdata", filename)
  if(!file.exists(filepath))
    sprintf("The example file '%s' does not exist. Please use iso_get_reader_examples() to see a list of all available example files.", filename) %>%
    stop(call. = FALSE)
  return(filepath)
}

#' @rdname iso_get_processor_example
#' @details \code{iso_get_processor_examples}: list of all available isoprocessor example data set
#' @export
iso_get_processor_examples <- function() {
  # global vars

  extension <- filename <- format <- NULL
  file_types <- isoreader::iso_get_supported_file_types()
  iso_expand_paths(
    ".", extensions = file_types$extension, root = system.file(package = "isoprocessor", "extdata")) %>%
    mutate(filename = basename(path)) %>%
    isoreader:::match_to_supported_file_types(file_types) %>%
    arrange(type, extension, filename) %>%
    select(filename, type, description)
}

# general helper functions ========

#' @export
magrittr::`%>%`

# check if a column is in a data frame (copied from isoreader)
col_in_df <- function(df, col) {
  stopifnot(is.data.frame(df))
  col %in% names(df)
}

# just because it always is very confusing error if the base `filter` is used instead
#' @export
dplyr::filter

# because it is super helpful for dealing with the isoprocessor nested data types
#' @export
tidyr::unnest

# collapse helper to deal with naming change in the glue package
collapse <- function(...) {
  if (exists("glue_collapse", where=asNamespace("glue"), mode="function"))
    glue::glue_collapse(...)
  else
    glue::collapse(...)
}

# text formatting

# has migrated to isoreader
#' @export
isoreader::iso_format

# information display ====

#' Print data table
#'
#' This function is DEPRECATED and will be removed in a future version.
#'
#' @param dt data table
#' @param select which columns to select (use c(...) to select multiple), supports all \link[dplyr]{select} syntax
#' @param filter any filter conditions to apply, by default does not filter any
#' @param print_func what function to use for printing (makes it easier to switch between different types of output)
#' @param title whether to provide a title message
#' @param unique whether to print only unique rows
#' @inheritParams iso_show_default_processor_parameters
#' @return the passed in data table (for piping)
#' @export
#' @note this is not working as well as planned - consider removing again or just allowing it to be a simplifying function..
iso_print_data_table <- function(dt, select = everything(), filter = TRUE, print_func = default(print_func), title = NULL, unique = TRUE, ...) {

  stop("iso_print_data_table is deprecated because of confusing behaviour. Please print the relevant information directly to console.", call. = FALSE)

}

#' Alias for summarize data table
#' @param ... deprecated
#' @export
iso_generate_summary_table <- function(...) {
  warning("this function was renamed --> calling iso_summarize_data_table() instead",
          immediate. = TRUE, call. = FALSE)
  iso_summarize_data_table(...)
}

# nesting ======

# @note: consider deprecating and replacing with direct nest command (nest now supports the dplyr select semantics just as well)
# nest data based on the grouping
# this is basically just an inverse version (in terms of selection) of \link[tidyr]{nest} that can handle the sophisticated selection parameters of select
# @param group_by what to keep out of the nested data (i.e. what to group by), by default nests everything
nest_data <- function(dt, group_by = NULL, nested_data = nested_data) {

  # safety checks and column matching
  if (missing(dt)) stop("no data table supplied", call. = FALSE)
  dt_cols <- get_column_names(dt, group_by = enquo(group_by), n_reqs = list(group_by = "*"))
  nested_col <- resolve_defaults(enquo(nested_data))

  # perform the nest
  dt %>%
    as_tibble() %>% # nest requires tbl
    nest(!!nested_col := -!!dt_cols$group_by)
}

#' Remove nested data
#'
#' Convenience function to remove nested columns in the data table (e.g. in preparation for printing to console or RMarkdown).
#' @inheritParams iso_prepare_for_calibration
#' @note not unit tested
#' @export
iso_remove_list_columns <- function(dt) {

  if (missing(dt)) stop("no data table supplied", call. = FALSE)

  list_cols <- dt %>% map_lgl(is_list)
  dt[!list_cols]
}

# note - documented but not exported
#' unnest parts of a data frame without loosing the rest
#'
#' note that this will lead to row duplication if the unnested variables have multiple entries per row of the \code{dt} data frame
#' also note that this will remove rows that have NULL in the nested_data column
#' @param select which columns to unnest - use \code{c(...)} to select multiple, supports all \link[dplyr]{select} syntax including renaming columns. Includes all columns by default (i.e. unnests an entire nested data frame).
#' @param nested_data which column to unnest the \code{select} from
#' @param keep_remaining_nested_data whether to keep any remaining parts of the partially unnested data (irrelevant if \code{select = everything()})
#' @param keep_other_list_data keep other list data columns (e.g. other data or model columns)
unnest_select_data <- function(dt, select = everything(), nested_data = nested_data, keep_remaining_nested_data = TRUE, keep_other_list_data = TRUE, keep_only_unique = TRUE) {
  # safety checks and column matching
  if (missing(dt)) stop("no data table supplied", call. = FALSE)
  dt_cols <- get_column_names(dt, nested_data = enquo(nested_data), type_reqs = list(nested_data = "list"))

  # columns before the nested data
  original_cols <- names(dt)
  before_nd_cols <- tidyselect::vars_select(original_cols, 1:!!sym(dt_cols$nested_data))

  # add row number and remove NULL columns
  dt <-
    dt %>%
    filter(!map_lgl(!!sym(dt_cols$nested_data), is.null)) %>%
    mutate(..row.. = row_number()) %>%
    as_tibble()

  # keep track of the different types of columns
  list_cols <- dt %>% map_lgl(is_list) %>% { names(.)[.] } %>% { .[.!=dt_cols$nested_data] }
  regular_cols <- setdiff(names(dt), c(list_cols, dt_cols$nested))

  # only unnest the main list column
  unnested_dt <- unnest(dt[c("..row..", dt_cols$nested_data)], !!sym(dt_cols$nested_data))

  # safety check on whether the select columns exist in the unnested df
  select_cols <- get_column_names(unnested_dt, select = enquo(select), n_reqs = list(select = "*"))$select

  # remove ..row.. from select cols
  select_cols <- select_cols[select_cols != "..row.."]

  # rename
  unnested_dt <- dplyr::rename(unnested_dt, !!!select_cols)
  select_cols <- names(select_cols)

  # check if there are naming conflicts between the select_cols and existing columns
  if (length(overlap <- intersect(regular_cols, select_cols)) > 0) {
    glue::glue(
      "some newly unnested columns have conflicting name(s) with existing ",
      "columns in the data frame and will be omitted from the unnested ",
      "data frame. Please rename these columns in the select statement to ",
      "preserve them: '{paste(overlap, collapse = \"', '\")}'") %>%
      warning(immediate. = TRUE, call. = FALSE)
    select_cols <- setdiff(select_cols, regular_cols)
  }

  # renest without the selected parameters
  keep_cols <- c("..row..", select_cols) %>% unique()
  if (length(setdiff(names(unnested_dt), keep_cols)) > 0 && keep_remaining_nested_data) {
    # renest if un-nesting is incomplete (i.e. data remains) and remaining data should be kept
    renested_dt <- unnested_dt %>% nest_data(group_by = !!keep_cols, nested = !!sym(dt_cols$nested_data))
    renested <- TRUE
  } else {
    renested_dt <- unnested_dt[keep_cols]
    renested <- FALSE
  }

  # merge back with the original data frame
  renested_dt <- dt[regular_cols] %>% right_join(renested_dt, by = "..row..")

  # merge the extra list columns back in (easier this way with the renest than using unnest for this)
  if (keep_other_list_data && length(list_cols) > 0) {
    renested_dt <- left_join(renested_dt, dt[c("..row..", list_cols)], by = "..row..")
  }

  # remaining before_nd_cols and new cols
  before_nd_cols <- intersect(before_nd_cols, names(renested_dt))
  new_cols <- setdiff(names(renested_dt), original_cols) %>% setdiff(before_nd_cols)
  if (dt_cols$nested_data %in% before_nd_cols) before_nd_cols <- head(before_nd_cols, -1)
  if (renested) new_cols <- c(new_cols, dt_cols$nested_data)

  # return
  renested_dt %>%
    # make sure no replication if only partial dataframe is unnested and rows are replicated despite remaining unique
    # includes ..row.. on purpose to make sure no unanticipated row collapes is possible
    unique() %>%
    # reconstruct order
    dplyr::select(!!!c(before_nd_cols, new_cols), everything()) %>%
    # remove the ..row.. again (just used for ID purposes)
    dplyr::select(-..row..)
}

# note - documented but not exported
#' Unnest model results column
#'
#' Convenience functions for unnesting model columns (supports both if regression is stored nested or unnested - see \link{run_regression} for details).
#'
#' @param model_column name of the model column to unnest
#' @param nested_model whether the model is nested, if TRUE, must also provide \code{model_params}
#' @param model_params name of the model params column that holds all the other model columns (if \code{nested_model = TRUE})
#' @inheritParams unnest_select_data
unnest_model_column <- function(dt, model_column, model_params = model_params, nested_model = FALSE,
                                 select = everything(),
                                 keep_remaining_nested_data = FALSE, keep_other_list_data = FALSE) {

  # safety checks
  if (missing(model_column)) stop("specify which model column to unnest", call. = FALSE)

  # deal with nested senarios
  if (nested_model) {
    dt_cols <- get_column_names(dt, model_params = enquo(model_params), type_reqs = list(model_params = "list"))

    # unnest model params
    original_cols <- names(dt)
    dt <- unnest(dt, !!sym(dt_cols$model_params))
    model_cols <- setdiff(names(dt), original_cols)
  }

  # unnest model column
  dt <- unnest_select_data(dt, select = !!enquo(select), nested_data = !!enquo(model_column),
                     keep_remaining_nested_data = keep_remaining_nested_data,
                     keep_other_list_data = keep_other_list_data)

  # deal with nested scenarios
  if (nested_model && keep_other_list_data) {
    # rennest model params
    model_cols <- model_cols[model_cols %in% names(dt)]
    dt <- dt %>% mutate(..row_id.. = row_number())
    dt <-
      inner_join(
        dt %>% dplyr::select(!!!map(model_cols, ~quo(-!!sym(.x)))),
        dt %>% dplyr::select(..row_id.., !!!map(model_cols, sym)) %>% nest(!!dt_cols$model_params := c(-..row_id..)),
        by = "..row_id.."
      ) %>%
      dplyr::select(-..row_id..)
  }

  return(dt)
}

# regression aux functions =====

# internal information on supported models
get_supported_models <- function() {
  # nls not supported for now
  tibble::tribble(
    ~.model_func,  ~.model_invertible,
    "lm",         TRUE,
    "glm",        TRUE,
    "lme",        TRUE,
    "loess",      FALSE
  )
}

# get all operations in an expression (in order of appearance)
# this goes through the quo recursively
# @param q quo
# @return text vector
get_call_operations <- function(q) {
  q_ops <- c()
  if (
    (rlang::is_quosure(q) && rlang::quo_is_call(q)) ||
    (!rlang::is_quosure(q) && rlang::is_call(q))) {
    q_ops <- c(rlang::call_name(q), map(rlang::call_args(q), get_call_operations))
  }
  return(unname(unlist(q_ops)))
}

# get all variables in an expression (in order of appearance)
# this goes through the quo recursively
# @param q quo
# @return text vector
get_call_variables <- function(q) {
  q_syms <- list()
  if (
    (rlang::is_quosure(q) && rlang::quo_is_symbol(q)) ||
    (!rlang::is_quosure(q) && rlang::is_symbol(q))) {
    q_syms <- rlang::as_label(q)
  } else if (
    (rlang::is_quosure(q) && rlang::quo_is_call(q)) ||
    (!rlang::is_quosure(q) && rlang::is_call(q))) {
    q_syms <- map(rlang::call_args(q), get_call_variables)
  }
  return(unique(unlist(q_syms)))
}

# get all variables used in a formula (in order of appearance)
# @param formula_q quoted formula expression of form y1 + y2 + ... ~ x1 + x2 + x3
get_formula_variables <- function(formula_q, get_x = TRUE, get_y = TRUE) {

  # safety check
  if (!rlang::quo_is_call(formula_q) || !rlang::call_name(formula_q) == "~")
    stop("not a valid formula of form 'y ~ ...': ", rlang::as_label(formula_q), call. = FALSE)

  # take apart the formula
  left_q <- rlang::call_args(formula_q)[[1]]
  right_q <- rlang::call_args(formula_q)[[2]]
  vars <- c()
  if (get_y) vars <- c(vars, get_call_variables(left_q))
  if (get_x) vars <- c(vars, get_call_variables(right_q))
  return(vars)
}

# get model formula variables
# @param model_q quoted model
# @param ... passed on to get_formula_variables
get_model_formula_variables <- function(model_q, ...) {
  args <- rlang::call_args(model_q)
  formula_idx <- which(names(args) == "formula")
  if (length(formula_idx) != 1L) formula_idx <- 1L
  get_formula_variables(quo(!!args[[formula_idx]]), ...)
}

# internal functiont to parse regression objects for information
parse_regressions <- function(df, reg_col) {
  stopifnot(is.data.frame(df))
  stopifnot(reg_col %in% names(df))
  supported_models <- get_supported_models()
  df %>%
    mutate(.model_func = map_chr(!!sym(reg_col), ~class(.x)[1]) %>% as.character()) %>%
    left_join(get_supported_models(), by = ".model_func") %>%
    mutate(
      .model_supported = !is.na(.model_invertible),
      .model_invertible = .model_supported & .model_invertible,
      .model_vars = map(!!sym(reg_col), ~tibble(var = all.vars(.x$terms), dependent = var %in% all.vars(.x$terms[[2]]))),
      .model_xs = map(.model_vars, ~filter(.x, !dependent)$var),
      .model_nx = map_int(.model_xs, length),
      .model_ys = map(.model_vars, ~filter(.x, dependent)$var),
      .model_ny = map_int(.model_ys, length)
    )
}

# implement a simple glance dispatch for loess models since this is not available form the broom package
# pulls out all the single values and renames s to sigma for consistency with glance.lm
glance.loess <- function(m) {
  smry <- summary(m)
  tibble::as_tibble(smry[map_int(smry, length) == 1L]) %>%
    dplyr::rename(sigma = s)
}

# implement a simple tidy dispatch for loess models since this is not available from the broom package
# coefficients don't make sense for loess models so returns an empty data frame
tidy.loess <- function(m) {
  tibble(term = character(0), estimate = double(0), std.error = double(0), statistic = double(0), p.value = double(0))
}

# regressions =====

# note - documented but not exported
#' run a set of regressions
#'
#' @param dt data table
#' @param model the regression model or named list of regression models. If a named list is provided, the name(s) will be stored in the \code{model_name} column instead of the formula.
#' @param nest_model whether to nest the model outcome columns (for easier use in multi model systems), default is FALSE
#' @param min_n_datapoints the minimum number of data points required for applying the model(s). Note that there is always an additional check to make sure the minimum number of degrees of freedom for each model is met. If the minimum number of degrees of freedom required is not met, the model will/can not be calculated no matter what \code{min_n_datapoints} is set to.
#' @param model_data the nested model data column
#' @param model_filter_condition a filter to apply to the data before running the regression (if only a subset of the data is part of the calibration data) - stored in \code{in_reg}, by default no filter
#' @param model_name new column with the model formulae or names if supplied
#' @param model_enough_data new column with information on whether the model has enough data (based on the required degrees of freedom for the model)
#' @param model_data_points new column with information on the number of data points the model is based on
#' @param model_fit the new model objects column
#' @param model_coefs the new model coefficients nested data frame column
#' @param model_summary the new model summary nested data frame column
#' @param model_params the nested model information (only relevant if \code{nest_model = TRUE})
#' @param in_reg name of the new logical column in the nested model_data that flags the data included in the regression (i.e. the data that fullfills the \code{model_filter_condition} if provided)
#' @param residual name of the new residual column in the nested model_data - residuals are only calculated for rows that are part of the regression (as determined by \code{model_filter_condition})
run_regression <- function(dt, model, nest_model = FALSE, min_n_datapoints = 1,
                           model_data = model_data, model_filter_condition = NULL,
                           model_name = model_name, model_enough_data = model_enough_data,
                           model_data_points = model_data_points,
                           model_fit = model_fit, model_coefs = model_coefs,
                           model_summary = model_summary, model_params = model_params,
                           in_reg = in_reg, residual = residual) {

  # safety checks
  if (missing(dt)) stop("no data table supplied", call. = FALSE)
  dt_cols <- get_column_names(dt, model_data = enquo(model_data), type_reqs = list(model_data = "list"))
  dt_new_cols <- get_new_column_names(
    model_name = enquo(model_name),
    model_enough_data = enquo(model_enough_data), model_data_points = enquo(model_data_points),
    model_fit = enquo(model_fit), model_coefs = enquo(model_coefs), model_summary = enquo(model_summary),
    model_params = enquo(model_params),
    in_reg = enquo(in_reg), residual = enquo(residual))
  filter_quo <- enquo(model_filter_condition) %>% { if(quo_is_null(.)) quo(TRUE) else . }

  # models
  if (missing(model)) stop("no regression model supplied", call. = FALSE)
  model_quos <- enquo(model)
  if (rlang::quo_is_call(model_quos) && rlang::call_name(model_quos) == "quos") {
    lquos <- rlang::eval_tidy(model_quos)
  } else {
    # resolve list of models
    if (rlang::quo_is_call(model_quos) && rlang::call_name(model_quos) %in% c("c", "list")) {
      lquos <- quos(!!!rlang::call_args(model_quos))
    } else {
      lquos <- quos(!!model_quos)
    }
  }

  # safety checks on models
  if (length(lquos) == 0) stop("no regression model supplied", call. = FALSE)
  supported_models <- get_supported_models()$.model_func
  lquos_are_models <- map_lgl(lquos, function(lq) rlang::quo_is_call(lq) && rlang::call_name(lq) %in% supported_models)
  lquos_info <-
    ifelse(
      nchar(names(lquos)) > 0,
      sprintf("%s = '%s'", names(lquos), map_chr(lquos, as_label)),
      sprintf("'%s'", map_chr(lquos, as_label))
    )
  if(!all(ok <- lquos_are_models)) {
    if (sum(!ok) > 1)
      glue::glue(
        "{glue::glue_collapse(lquos_info[!ok], sep = \", \", last = \" and \")} ",
        "do not refer to supported models ",
        "({glue::glue_collapse(supported_models, sep = ', ', last = ' or ')})") %>%
      stop(call. = FALSE)
    else
      glue::glue(
        "{glue::glue_collapse(lquos_info[!ok], sep = \", \", last = \" and \")} ",
        "does not refer to a supported model ",
        "({glue::glue_collapse(supported_models, sep = ', ', last = ' or ')})") %>%
      stop(call. = FALSE)
  }

  # safety checks on variables
  model_vars_y <- map(lquos, get_model_formula_variables, get_x = FALSE)
  model_vars_x <- map(lquos, get_model_formula_variables, get_y = FALSE)
  dt_names <- map(dt[[dt_cols$model_data]], names) %>% unlist() %>% unique()
  missing_cols <- map2(model_vars_x, model_vars_y, ~setdiff(c(.x, .y), dt_names))
  all_cols_available <- map_lgl(missing_cols, ~length(.x) == 0)
  if (!all(ok <- all_cols_available)) {
    glue::glue(
      "not all variables exist in the data set:\n - ",
      sprintf("model %s is missing column(s) '%s'", lquos_info[!ok], map_chr(missing_cols[!ok], ~paste(.x, collapse = "', '"))) %>%
        paste(collapse = "\n - ")) %>%
      stop(call. = FALSE)
  }

  # safety check on independent variables
  model_ny <- map_int(model_vars_y, length)
  if (!all(ok <- model_ny == 1L)) {
    glue("multiple dependent (y) variables are not supported, problematic model(s): ",
         "{paste(lquos_info[!ok], collapse = ', ')}") %>%
      stop(call. = FALSE)
  }

  # models data frame
  models <-
    tibble(
      ..model_id.. = 1:length(lquos),
      model_formula = map_chr(lquos, as_label),
      !!dt_new_cols$model_name := ifelse(nchar(names(lquos)) > 0, names(lquos), model_formula),
      ..model_quo.. = map(lquos, identity),
      ..model_ys.. = model_vars_y,
    ) %>%
    # don't keep separate formula column
    select(-model_formula)

  # check for duplicate model names/formulae
  if (any(dups <- duplicated(models[[dt_new_cols$model_name]]))){
    dup_names <- models[[dt_new_cols$model_name]][dups]
    glue("regressions with multiple models require unique model formulae or names (if specified), encountered duplicates: '{collapse(dup_names, \"', '\")}'") %>%
      stop(call. = FALSE)
  }

  # combination of data and model
  data_w_models <-
    dt %>%
    mutate(..group_id.. = row_number()) %>% # for easier sorting
    tidyr::crossing(models) %>%
    # evaluation of model
    mutate(
      # check if there is any data
      ..n_data_points.. = map_int(!!sym(dt_cols$model_data), ~{
        data <- .x
        check_expressions(data, filter_quo)
        nrow(filter(data, !!filter_quo))
      }) %>% unname(),
      !!dt_new_cols$model_enough_data := ..n_data_points.. >= min_n_datapoints,
      !!dt_new_cols$model_data_points := ..n_data_points..,
      # fit the model if there is any data
      !!dt_new_cols$model_fit :=
        pmap(list(m = ..model_quo.., d = !!sym(dt_cols$model_data), run = !!sym(dt_new_cols$model_enough_data)),
             # strip units to avoid issues with non-numeric predictors
             function(m, d, run) if (run) eval_tidy(m, data = filter(iso_strip_units(d), !!filter_quo)) else NULL),
      # figure out which fits actually have enough degrees of freedom
      !!dt_new_cols$model_enough_data :=
        map2_lgl(!!sym(dt_new_cols$model_fit), !!sym(dt_new_cols$model_enough_data),
                 ~if (.y) {
                   !any(coef(.x) %>% as.list() %>% map_lgl(is.na))
                  } else FALSE
        ) %>% unname(),
      # get the coefficients
      !!dt_new_cols$model_coefs := map2(
        !!sym(dt_new_cols$model_fit), !!sym(dt_new_cols$model_enough_data),
        ~if (.y) {
          mutate(as_tibble(tidy(.x)),
                 # add in significant level summary
                 signif = case_when(
                   p.value < 0.001 ~ "*** (p < 0.001)",
                   p.value < 0.01 ~  "**  (p < 0.01)",
                   p.value < 0.05 ~  "*   (p < 0.05)",
                   p.value < 0.1 ~   ".   (p < 0.1)",
                   TRUE ~ "-")
          )
        } else NULL),
      # get the summary
      !!dt_new_cols$model_summary :=
        map2(!!sym(dt_new_cols$model_fit), !!sym(dt_new_cols$model_enough_data),
             # NOTE: broom does not make sure all columns are unnamed which can cause downstream problems, hence the manual unname
             ~if (.y) { as_tibble(glance(.x)) %>% dplyr::mutate_all(unname) } else { NULL })
    )

  # warnings
  if ((not_enough <- sum(!data_w_models[[dt_new_cols$model_enough_data]])) > 0) {

    data_not_enough <- data_w_models %>%
      dplyr::mutate(..row_nr.. = dplyr::row_number()) %>%
      dplyr::filter(!(!!rlang::sym(dt_new_cols$model_enough_data)))

    # get grouping information by using the columns before the nested data
    group_cols <- head(names(dplyr::select(data_not_enough, 1:dt_cols$model_data)), -1)
    grp_labels <- do.call(
      paste,
      args = c(list(sep = ", "), purrr::map(group_cols, ~paste0(.x, "='", data_not_enough[[.x]], "'")))
    )

    glue("{not_enough} of {nrow(data_w_models)} regressions have insufficient ",
         "degrees of freedom (not enough data given the regression models and/or ",
         "requested minimum number of data points). ",
         "Please double check that all peaks are mapped correctly ",
         "(see ?iso_get_problematic_peak_mappings and ",
         "?iso_summarize_peak_mappings) and that the filter condition ",
         "('{as_label(filter_quo)}') is correct.",
         "\n - ",
         # note: difficult to account for groupings here but it's important to provide some information
         paste(
           sprintf("data set %d (%s), model '%s' has only %d calibration data points which is not enough",
                   data_not_enough$..row_nr.., grp_labels,
                   data_not_enough[[dt_new_cols$model_name]],
                   data_not_enough$..n_data_points..
           ),
           collapse = "\n - "
         )) %>%
    warning(immediate. = TRUE, call. = FALSE)
  }

  # add residuals
  data_w_models <-
    data_w_models %>%
    mutate(
      !!dt_cols$model_data :=
        pmap(
          list(d = !!sym(dt_cols$model_data), fit = !!sym(dt_new_cols$model_fit), run = !!sym(dt_new_cols$model_enough_data), y = ..model_ys..),
          function(d, fit, run, y) {
            d[[dt_new_cols$in_reg]] <- FALSE
            d[[dt_new_cols$residual]] <- NA_real_
            if (run) {
              # find which ones are in reg
              d[[dt_new_cols$in_reg]] <- rlang::eval_tidy(filter_quo, data = d)
              # calculate residuals
              resids <- rep(NA_real_, nrow(d))
              resids[d[[dt_new_cols$in_reg]]] <- residuals(fit)
              # process units
              y_units <- iso_get_units(d[[y]])
              if (!is.na(y_units)) {
                resids <- iso_double_with_units(resids, units = y_units)
              }
              d[[dt_new_cols$residual]] <- resids
            }
            return(d)
          })
    ) %>%
    # remove temp variables
    select(-..n_data_points.., -..model_ys..)

  # nest model
  if (nest_model) {
    # generated row id for unique matching id
    data_w_models <- data_w_models %>% mutate(..row_id.. = row_number())
    model_cols <- c(dt_new_cols$model_fit, dt_new_cols$model_coefs, dt_new_cols$model_summary)
    data_w_models <-
      inner_join(
        data_w_models %>% select(!!!map(model_cols, ~quo(-!!sym(.x)))),
        data_w_models %>% select(..row_id.., !!!map(model_cols, sym)) %>%
          nest(!!dt_new_cols$model_params := -..row_id..),
        by = "..row_id.."
      ) %>%
      select(-..row_id..)
  }

  data_w_models %>%
    arrange(..group_id.., ..model_id..) %>%
    select(-..group_id.., -..model_quo.., -..model_id..)
}

# run regressions in grouped blocks (uses nest_data and run_regression)
# @note is this really used or should it be deprecated?
# @probably deprecate
run_grouped_regression <- function(dt, group_by = NULL, model = NULL, model_data = default(model_data), ...) {
  # this one should do the nesting, regression analyses all in once
  if (missing(model)) stop("no model supplied", call. = FALSE)
  model_quo <- enquo(model)
  model_data_quo <- enquo(model_data)
  nest_data(dt, group_by = !!enquo(group_by), nested_data = !!model_data_quo) %>%
    run_regression(model = !!model_quo, model_data = !!model_data_quo, ...)
}

# apply regression =====

# note - documented but not exported
# @FIXME: fix documentation, the inheritance from run_regression is not quite right
# @NOTE: model_name is only included for error reporting purposes - is that silly?
#' apply a regression for calibration purposes
#'
#' this function can predict dependent variables (y) from calibration regressions as well as independent variables (xs) by regression inversion for single and multi-variate linear regressions
#'
#' @param dt data table with calibrations
#' @param predict which value to calculate, must be the regression's independent variable (regression is applied directly) or one of the independent variables (regression will be automatically inverted).
#' @param calculate_error whether to estimate the standard error from the calibration. Stores the result in the new \code{predict_error} column. If the \code{predict} variable is a dependent variable, will do so using the Wald method (as described in \link[investr]{invest}). Note that error calculation for dependent variables slows this function down a fair bit and is therefore disabled by default.
#' @inheritParams run_regression
#' @inheritParams unnest_model_column
#' @param predict_value the new column in the model_data that holds the predicted value
#' @param predict_error the new column in the model_data that holds the error of the predicted value (always created but \code{NA} if \code{calculate_error = FALSE})
#' @param predict_range vector of 2 numbers. Only relevant for predicting dependent variables (regression inversion). If provided will be used for finding the solution for the predict variable. By default uses the range observed in the calibration variables. Specifying the \code{predict_range} is usually only necessary if the calibration range must be extrapolated significantely.
apply_regression <- function(dt, predict, nested_model = FALSE, calculate_error = FALSE,
                             model_data = model_data, model_name = model_name,
                             model_fit = model_fit, model_params = model_params,
                             predict_value = pred, predict_error = pred_se,
                             predict_range = NULL) {

  # safety checks
  if (missing(dt)) stop("no data table supplied", call. = FALSE)

  if (nested_model) {
    # nested model
    dt_cols <- get_column_names(
      dt, model_name = enquo(model_name), model_data = enquo(model_data),
      model_params = enquo(model_params),
      type_reqs = list(model_name = "character", model_data = "list", model_params = "list"))

    # check for columns inside nested data
    dt_cols <- c(
      dt_cols,
      get_column_names(unnest(dt, !!sym(dt_cols$model_params)),
                       model_fit = enquo(model_fit), type_reqs = list(model_fit = "list")))

    # pull out the model fit
    dt <- dt %>%
      mutate(
        !!dt_cols$model_fit := map(!!sym(dt_cols$model_params), ~.x[[dt_cols$model_fit]])
      ) %>%
      unnest(!!sym(dt_cols$model_fit))
  } else {
    # not nested model
    dt_cols <- get_column_names(
      dt, model_data = enquo(model_data),
      model_name = enquo(model_name), model_fit = enquo(model_fit),
      type_reqs = list(model_name = "character", model_data = "list", model_fit = "list"))
  }

  # new cols (+predict)
  dt_new_cols <- get_new_column_names(
    predict = enquo(predict), predict_value = enquo(predict_value),
    predict_error = enquo(predict_error))

  # figure out all the necesary information for applying the regression
  supported_models <- get_supported_models()
  dt <-
    dt %>%
    # general information about the model parameters
    parse_regressions(reg_col = dt_cols$model_fit) %>%
    mutate(
      # information about the prediction variable
      .predict_var = dt_new_cols$predict,
      .predict_info = map(.model_vars, ~filter(.x, var == dt_new_cols$predict)),
      .predict_var_ok = map_lgl(.predict_info, ~nrow(.x) == 1),
      .predict_dependent = map_lgl(.predict_info, ~identical(.x$dependent, TRUE)),
      # y variable of the model (only 1, this is checked during run_regression)
      .predict_y = .model_ys,
      # x variables (including the predict_var if it's an x value)
      .predict_xs = map(.model_vars, ~filter(.x, var != dt_new_cols$predict, !dependent)$var)
    )

  # safety checks
  if (!all(dt$.predict_var_ok)) {
    glue("cannot apply regression - variable '{dt_new_cols$predict}' is not a variable in the following model(s): ",
         "{collapse(filter(dt, !.predict_var_ok)[[dt_cols$model_name]] %>% unique(), sep = ', ')}") %>%
      stop(call. = FALSE)
  }
  if (!all(dt$.predict_dependent | dt$.model_invertible)) {
    glue("encountered regression model type(s) that cannot be inverted (only dependent variables can be predicted for these): ",
         "{collapse(filter(dt, !.predict_dependent & !.model_invertible)$.model_func %>% unique(), sep = ', ')}") %>%
      stop(call. = FALSE)
  }

  # inversion functions
  safe_invest <- safely(invest)
  invest_interval_method <- if (calculate_error) "Wald" else "none"

  # apply regression
  dt <-
    dt %>%
    mutate(
      !!dt_cols$model_data :=
        pmap(
          list(d = !!sym(dt_cols$model_data), fit = !!sym(dt_cols$model_fit), name = !!sym(dt_cols$model_name),
               var = .predict_var, dependent = .predict_dependent,
               y = .predict_y, xs = .predict_xs),

          # process data set for a single model
          function(d, fit, name, var, dependent, y, xs) {

            # units of predict var
            var_units <- iso_get_units(d[[var]])

            # check for enough data (standard eval to avoid quoting trouble)
            d_enough_data <-
              if (dependent) rowSums(is.na(d[c(y, xs)]) * 1) == 0
              else rowSums(is.na(d[xs]) * 1) == 0

            # data with rows
            d <- d %>%
              mutate(
                ..enough_data.. = d_enough_data,
                ..rn.. = 1:length(d_enough_data)
              )

            if (dependent) {
              # normal predict =====
              # predict dependent variable (y) by normal regression predict
              new_data <- d %>% select(xs) %>% iso_strip_units()
              pred <- stats::predict(fit, newdata = new_data, se = calculate_error)
              d_prediction <-
                tibble(
                  ..rn.. = d$..rn..,
                  ..estimate.. = if (calculate_error) unname(pred$fit) else unname(pred),
                  ..se.. = if (calculate_error) unname(pred$se.fit) else NA_real_,
                  ..problem.. = NA_character_
                )
            } else {
              # inversion predict ======
              # predict an independent variable by inversion

              # range
              if(!is.null(predict_range)) {
                # parameter provided predict_range
                range <- predict_range
              } else {
                # estimate predict range based on available values
                # NOTE: should this be only values that were used for calib? (i.e. have residual column set)
                range <- base::range(as.numeric(d[[var]]), na.rm = TRUE)
              }
              range_tolerance_escalation <- c(0, 1, 10, 100, 1000)

              # do the calculate for each row
              d_prediction <- d %>%
                # strip units to avoid issues with non-numeric predictors
                iso_strip_units() %>%
                # NOTE: use group by and do to get a more informative progress bar in interactive use
                group_by(..rn..) %>%
                do({
                  # values
                  estimate <- NA_real_
                  se <- NA_real_
                  problem <- NA_character_

                  # check whether have enough data
                  if (.$..enough_data..) {

                    # cycle through range tolerance
                    for(range_tolerance in range_tolerance_escalation) {
                      # try to find fit
                      out <- safe_invest(
                        fit, y0 = .[[y]], x0.name = var, data = ., newdata = .[xs],
                        interval = invest_interval_method,
                        lower = range[1] - range_tolerance * diff(range),
                        upper = range[2] + range_tolerance * diff(range),
                        # NOTE: the following doesn't really do anything, using the tolerance ranges instead
                        extendInt = "yes"
                      )

                      # break as soon as success or a different error
                      if (is.null(out$error) || !str_detect(out$error$message, "not found in.*search interval")) {
                        break
                      }
                    }

                    # process outcome
                    if (is.null(out$error) && calculate_error) {
                      # with error estimates
                      estimate <- unname(out$result$estimate)
                      se <- unname(out$result$se)
                    } else if (is.null(out$error)) {
                      # no error estimates
                      estimate <- unname(out$result)
                    } else if (str_detect(out$error$message, "not found in the search interval")) {
                      problem <- glue(
                        "No solution for '{var}' in the interval {range[1] - range_tolerance * diff(range)} ",
                        "to {range[2] + range_tolerance * diff(range)}, potential fit is too far outside ",
                        "the calibration range",
                        "- consider manually adjusting parameter 'predict_range'.") %>%
                        as.character()
                    } else {
                      problem <- out$error$message
                    }
                  } else {
                    # not enough data
                    problem <- glue("Not enough data, missing a value for at least one of the variables: ",
                                    "'{collapse(c(y, xs), sep = \"', '\", last = \"' or '\")}'") %>%
                      as.character()
                  }

                  # return data
                  tibble(
                    ..estimate.. := estimate,
                    ..se.. := se,
                    ..problem.. = problem
                  )
                }) %>% ungroup()
            }

            # units
            if (!is.na(var_units)) {
              d_prediction <-
                d_prediction %>%
                mutate(
                  ..estimate.. = iso_double_with_units(..estimate.., units = var_units),
                  ..se.. = iso_double_with_units(..se.., units = var_units)
                )
            }

            # warnings about problematic sets
            if (nrow(d_problematic <- filter(d_prediction, !is.na(..problem..))) > 0) {
              problems <- d_problematic %>%
                group_by(..problem..) %>%
                summarize(n = length(..rn..), rows = collapse(..rn.., sep = ", ", last = " and ")) %>%
                mutate(message = glue("{n} data rows ({rows}) failed with the following error/warning: {..problem..}"))
              glue("failed to calculate '{var}' with regression model '{name}' for {nrow( d_problematic)}/{nrow(d_prediction)} data entries:\n",
                   " - {collapse(problems$message, sep = '\n - ')}") %>%
                warning(immediate. = TRUE, call. = FALSE)

            }

            # return combined
            left_join(d, d_prediction, by = "..rn..") %>%
              select(-..rn.., -..enough_data.., -..problem..) %>%
              rename(
                !!dt_new_cols$predict_value := ..estimate..,
                !!dt_new_cols$predict_error := ..se..
              )
          })
    )

  # cleanup
  dt <- dt %>%
    # remove helper columns
    select(-starts_with(".predict"), -starts_with(".model"))

  # remove unnested columns if in nested mode
  if (nested_model) {
    dt <- dt %>% select(-!!sym(dt_cols$model_fit))
  }

  return(dt)
}

# regression range =====

#' evaluate regression ranges
#' @inheritParams run_regression
#' @inheritParams apply_regression
#' @param ... which terms to evaluate the range for. Can be individual columns or more complex numeric expressions. All must be valid within the scope of the model_data and only numeric values are currently evaluated.
#' @param model_range new column recording the range of all terms for each model and model_data combination.
#' @param in_range new column in the model_data that holds whether a data entry is within the model_range. Checks whether all terms are within the model_range and records a textual summary of the result in this new column.
evaluate_range <- function(
  dt, ..., nested_model = FALSE, model_data = model_data, model_params = model_params,
  in_reg = in_reg, model_range = model_range, in_range = in_range) {

  # safety checks
  if (missing(dt)) stop("no data table supplied", call. = FALSE)

  # terms
  terms_quos <- rlang::enquos(...)
  if (length(terms_quos) == 0) {
    stop("no terms for range evaluation are provided, please specify at least one term", call. = FALSE)
  }

  # dt columns
  dt_cols <- get_column_names(
    dt, model_data = enquo(model_data), type_reqs = list(model_data = "list"))

  # nested model
  if (nested_model) {
    dt_cols <- c(
      dt_cols,
      get_column_names(
        dt, model_params = enquo(model_params),type_reqs = list(model_params = "list"))
    )
  }

  # terms tibble
  terms <-
    tibble(
      term = map_chr(terms_quos, rlang::as_label) %>% unname(),
      q = terms_quos
    )

  # data columns
  in_reg_quo <- enquo(in_reg)
  dt <- dt %>%
    mutate(
      ..data_cols.. = map(!!sym(dt_cols$model_data), ~
          {
            model_data <- .x
            get_column_names(model_data, in_reg = in_reg_quo,
                             type_reqs = list(in_reg = "logical"))
          })
    )

  # new columns
  dt_new_cols <- get_new_column_names(model_range = enquo(model_range), in_range = enquo(in_range))

  # find range
  dt_w_ranges <-
    dt %>%
    mutate(
      !!dt_new_cols$model_range := purrr::map2(
        !!sym(dt_cols$model_data),
        ..data_cols..,
        ~{
          # consider only data that is part of the regression
          d_in_calib <- .x[.x[[.y$in_reg]],]
          if (nrow(d_in_calib) > 0) {
            # determine the ranges for all terms
            tryCatch(
              terms_ranges <-
                terms %>%
                mutate(
                  values = map(q, ~rlang::eval_tidy(.x, data = d_in_calib)),
                  units = map_chr(values, iso_get_units) %>% unname(),
                  min = map_dbl(
                    values, ~.x %>%
                      { if(is.numeric(.)) { as.numeric(min(., na.rm = TRUE)) } else { NA_real_} }) %>%
                    unname(),
                  max = map_dbl(
                    values, ~.x %>%
                      { if(is.numeric(.)) { as.numeric(max(., na.rm = TRUE)) } else { NA_real_} }) %>%
                    unname()
                ) %>%
                select(-q, -values),
              error = function(e) {
                glue::glue(
                  "not all range evaluation terms are valid expressions in the context of the ",
                  "model data: {e$message}") %>%
                  stop(call. = FALSE)
              }
            )
            return(terms_ranges)
          } else {
            return(mutate(terms, units = NA_character_, min = NA_real_, max = NA_real_) %>% select(-q))
          }
        }
      )
    )

  # evaluate range
  dt_w_ranges <- dt_w_ranges %>%
    mutate(
      !!dt_cols$model_data :=
        map2(!!sym(dt_cols$model_data), !!sym(dt_new_cols$model_range), ~{
          model_data <- mutate(.x, ..data_id.. = row_number())

          # evaluate ranges
          terms_evals <- terms %>%
            # find data values for each term
            mutate(..values.. = map(q, ~{
              mutate(
                model_data,
                value = rlang::eval_tidy(.x, data = model_data) %>% as.numeric()
              ) %>% select(..data_id.., value)
            }) %>% unname()
            ) %>%
            # remove quos
            select(-q) %>%
            # figure out if it's within our outside range
            tidyr::unnest(cols = ..values..) %>%
            left_join(.y, by = "term") %>%
            mutate(
              in_range = case_when(
                is.na(min) | is.na(max) ~ sprintf("'%s' range NA", term),
                is.na(value) ~ sprintf("'%s' value NA", term),
                value < min ~ sprintf("<'%s' range", term),
                value > max ~ sprintf(">'%s' range", term),
                TRUE ~ "OK"
              )
            ) %>%
            group_by(..data_id..) %>%
            summarize(
              !!dt_new_cols$in_range :=
                if (all(in_range == "OK")) "in range"
                else paste(unique(in_range[in_range != "OK"]), collapse = ", ")
            )

          # add in the range evaluation
          model_data %>%
            {
              if (dt_new_cols$in_range %in% names(.)) select(., -!!sym(dt_new_cols$in_range))
              else .
            } %>%
            left_join(select(terms_evals, ..data_id.., !!sym(dt_new_cols$in_range)), by = "..data_id..") %>%
            select(-..data_id..)
        })
    )

  # remove unnested columns if in nested mode
  if (nested_model) {
    dt_w_ranges <- dt_w_ranges %>%
      # nest the new model range column into the nested columns
      mutate(!!dt_cols$model_params := map2(!!sym(dt_cols$model_params), !!sym(dt_new_cols$model_range),
                                            ~{.x[[dt_new_cols$model_range]] <- list(.y); .x})) %>%
      # remove the not nested columns
      select(-!!sym(dt_new_cols$model_range))
  }

  return(dt_w_ranges %>% select(-..data_cols..))
}
KopfLab/isoprocessorCUB documentation built on Nov. 8, 2021, 9:54 a.m.