R/upliftrandomforest.R

Defines functions .h2o.train_segments_upliftrandomforest h2o.upliftRandomForest

Documented in h2o.upliftRandomForest

# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_R.py
# Copyright 2016 H2O.ai;  Apache License Version 2.0 (see LICENSE for details) 
#'
# -------------------------- Uplift Random Forest Model in H2O -------------------------- #
#'
#' Build a Uplift Random Forest model
#' 
#' Builds a Uplift Random Forest model on an H2OFrame.
#'
#' @param x (Optional) A vector containing the names or indices of the predictor variables to use in building the model.
#'        If x is missing, then all columns except y are used.
#' @param y The name or column index of the response variable in the data. 
#'        The response must be either a numeric or a categorical/factor variable. 
#'        If the response is numeric, then a regression model will be trained, otherwise it will train a classification model.
#' @param training_frame Id of the training data frame.
#' @param treatment_column Define the column which will be used for computing uplift gain to select best split for a tree. The column has
#'        to divide the dataset into treatment (value 1) and control (value 0) groups. Defaults to treatment.
#' @param model_id Destination id for this model; auto-generated if not specified.
#' @param validation_frame Id of the validation data frame.
#' @param score_each_iteration \code{Logical}. Whether to score during each iteration of model training. Defaults to FALSE.
#' @param score_tree_interval Score the model after every so many trees. Disabled if set to 0. Defaults to 0.
#' @param ignore_const_cols \code{Logical}. Ignore constant columns. Defaults to TRUE.
#' @param ntrees Number of trees. Defaults to 50.
#' @param max_depth Maximum tree depth (0 for unlimited). Defaults to 20.
#' @param min_rows Fewest allowed (weighted) observations in a leaf. Defaults to 1.
#' @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the best point
#'        Defaults to 20.
#' @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the root level, then
#'        decrease by factor of two per level Defaults to 1024.
#' @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best point. Higher
#'        values can lead to more overfitting. Defaults to 1024.
#' @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable. Defaults to 0.
#' @param seed Seed for random numbers (affects certain parts of the algo that are stochastic and those might or might not be enabled by default).
#'        Defaults to -1 (time-based random number).
#' @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p} for
#'        classification and p/3 for regression (where p is the # of predictors Defaults to -2.
#' @param sample_rate Row sample rate per tree (from 0.0 to 1.0) Defaults to 0.632.
#' @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to 1.0), for each tree
#' @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be > 0.0 and <= 2.0) Defaults to 1.
#' @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0) Defaults to 1.
#' @param histogram_type What type of histogram to use for finding optimal split points Must be one of: "AUTO", "UniformAdaptive",
#'        "Random", "QuantilesGlobal", "RoundRobin", "UniformRobust". Defaults to AUTO.
#' @param categorical_encoding Encoding scheme for categorical features Must be one of: "AUTO", "Enum", "OneHotInternal", "OneHotExplicit",
#'        "Binary", "Eigen", "LabelEncoder", "SortByResponse", "EnumLimited". Defaults to AUTO.
#' @param distribution Distribution function Must be one of: "AUTO", "bernoulli", "multinomial", "gaussian", "poisson", "gamma",
#'        "tweedie", "laplace", "quantile", "huber". Defaults to AUTO.
#' @param check_constant_response \code{Logical}. Check if response column is constant. If enabled, then an exception is thrown if the response
#'        column is a constant value.If disabled, then model will train regardless of the response column being a
#'        constant value or not. Defaults to TRUE.
#' @param uplift_metric Divergence metric used to find best split when building an uplift tree. Must be one of: "AUTO", "KL",
#'        "Euclidean", "ChiSquared". Defaults to AUTO.
#' @param auuc_type Metric used to calculate Area Under Uplift Curve. Must be one of: "AUTO", "qini", "lift", "gain". Defaults to
#'        AUTO.
#' @param auuc_nbins Number of bins to calculate Area Under Uplift Curve. Defaults to -1.
#' @param verbose \code{Logical}. Print scoring history to the console (Metrics per tree). Defaults to FALSE.
#' @return Creates a \linkS4class{H2OModel} object of the right type.
#' @seealso \code{\link{predict.H2OModel}} for prediction
#' @export
h2o.upliftRandomForest <- function(x,
                                   y,
                                   training_frame,
                                   treatment_column,
                                   model_id = NULL,
                                   validation_frame = NULL,
                                   score_each_iteration = FALSE,
                                   score_tree_interval = 0,
                                   ignore_const_cols = TRUE,
                                   ntrees = 50,
                                   max_depth = 20,
                                   min_rows = 1,
                                   nbins = 20,
                                   nbins_top_level = 1024,
                                   nbins_cats = 1024,
                                   max_runtime_secs = 0,
                                   seed = -1,
                                   mtries = -2,
                                   sample_rate = 0.632,
                                   sample_rate_per_class = NULL,
                                   col_sample_rate_change_per_level = 1,
                                   col_sample_rate_per_tree = 1,
                                   histogram_type = c("AUTO", "UniformAdaptive", "Random", "QuantilesGlobal", "RoundRobin", "UniformRobust"),
                                   categorical_encoding = c("AUTO", "Enum", "OneHotInternal", "OneHotExplicit", "Binary", "Eigen", "LabelEncoder", "SortByResponse", "EnumLimited"),
                                   distribution = c("AUTO", "bernoulli", "multinomial", "gaussian", "poisson", "gamma", "tweedie", "laplace", "quantile", "huber"),
                                   check_constant_response = TRUE,
                                   uplift_metric = c("AUTO", "KL", "Euclidean", "ChiSquared"),
                                   auuc_type = c("AUTO", "qini", "lift", "gain"),
                                   auuc_nbins = -1,
                                   verbose = FALSE)
{
  # Validate required training_frame first and other frame args: should be a valid key or an H2OFrame object
  training_frame <- .validate.H2OFrame(training_frame, required=TRUE)
  validation_frame <- .validate.H2OFrame(validation_frame, required=FALSE)

  # Validate other required args
  # If x is missing, then assume user wants to use all columns as features.
  if (missing(x)) {
     if (is.numeric(y)) {
         x <- setdiff(col(training_frame), y)
     } else {
         x <- setdiff(colnames(training_frame), y)
     }
  }

  # Build parameter list to send to model builder
  parms <- list()
  parms$training_frame <- training_frame
  args <- .verify_dataxy(training_frame, x, y)
  if (!missing(treatment_column)) {
    parms$treatment_column <- treatment_column
  } else {
    stop("Treatment column is required.")  
  }
  parms$ignored_columns <- args$x_ignore
  parms$response_column <- args$y

  if (!missing(model_id))
    parms$model_id <- model_id
  if (!missing(validation_frame))
    parms$validation_frame <- validation_frame
  if (!missing(score_each_iteration))
    parms$score_each_iteration <- score_each_iteration
  if (!missing(score_tree_interval))
    parms$score_tree_interval <- score_tree_interval
  if (!missing(ignore_const_cols))
    parms$ignore_const_cols <- ignore_const_cols
  if (!missing(ntrees))
    parms$ntrees <- ntrees
  if (!missing(max_depth))
    parms$max_depth <- max_depth
  if (!missing(min_rows))
    parms$min_rows <- min_rows
  if (!missing(nbins))
    parms$nbins <- nbins
  if (!missing(nbins_top_level))
    parms$nbins_top_level <- nbins_top_level
  if (!missing(nbins_cats))
    parms$nbins_cats <- nbins_cats
  if (!missing(max_runtime_secs))
    parms$max_runtime_secs <- max_runtime_secs
  if (!missing(seed))
    parms$seed <- seed
  if (!missing(mtries))
    parms$mtries <- mtries
  if (!missing(sample_rate))
    parms$sample_rate <- sample_rate
  if (!missing(sample_rate_per_class))
    parms$sample_rate_per_class <- sample_rate_per_class
  if (!missing(col_sample_rate_change_per_level))
    parms$col_sample_rate_change_per_level <- col_sample_rate_change_per_level
  if (!missing(col_sample_rate_per_tree))
    parms$col_sample_rate_per_tree <- col_sample_rate_per_tree
  if (!missing(histogram_type))
    parms$histogram_type <- histogram_type
  if (!missing(categorical_encoding))
    parms$categorical_encoding <- categorical_encoding
  if (!missing(check_constant_response))
    parms$check_constant_response <- check_constant_response
  if (!missing(uplift_metric))
    parms$uplift_metric <- uplift_metric
  if (!missing(auuc_type))
    parms$auuc_type <- auuc_type
  if (!missing(auuc_nbins))
    parms$auuc_nbins <- auuc_nbins

  if (!missing(distribution)) {
    warning("The only bernoulli distribution is supported for Uplift Random Forest.")
    parms$distribution <- 'bernoulli'
  }

  # Error check and build model
  model <- .h2o.modelJob('upliftdrf', parms, h2oRestApiVersion=3, verbose=verbose)
  return(model)
}
.h2o.train_segments_upliftrandomforest <- function(x,
                                                   y,
                                                   training_frame,
                                                   treatment_column,
                                                   validation_frame = NULL,
                                                   score_each_iteration = FALSE,
                                                   score_tree_interval = 0,
                                                   ignore_const_cols = TRUE,
                                                   ntrees = 50,
                                                   max_depth = 20,
                                                   min_rows = 1,
                                                   nbins = 20,
                                                   nbins_top_level = 1024,
                                                   nbins_cats = 1024,
                                                   max_runtime_secs = 0,
                                                   seed = -1,
                                                   mtries = -2,
                                                   sample_rate = 0.632,
                                                   sample_rate_per_class = NULL,
                                                   col_sample_rate_change_per_level = 1,
                                                   col_sample_rate_per_tree = 1,
                                                   histogram_type = c("AUTO", "UniformAdaptive", "Random", "QuantilesGlobal", "RoundRobin", "UniformRobust"),
                                                   categorical_encoding = c("AUTO", "Enum", "OneHotInternal", "OneHotExplicit", "Binary", "Eigen", "LabelEncoder", "SortByResponse", "EnumLimited"),
                                                   distribution = c("AUTO", "bernoulli", "multinomial", "gaussian", "poisson", "gamma", "tweedie", "laplace", "quantile", "huber"),
                                                   check_constant_response = TRUE,
                                                   uplift_metric = c("AUTO", "KL", "Euclidean", "ChiSquared"),
                                                   auuc_type = c("AUTO", "qini", "lift", "gain"),
                                                   auuc_nbins = -1,
                                                   segment_columns = NULL,
                                                   segment_models_id = NULL,
                                                   parallelism = 1)
{
  # formally define variables that were excluded from function parameters
  model_id <- NULL
  verbose <- NULL
  destination_key <- NULL
  # Validate required training_frame first and other frame args: should be a valid key or an H2OFrame object
  training_frame <- .validate.H2OFrame(training_frame, required=TRUE)
  validation_frame <- .validate.H2OFrame(validation_frame, required=FALSE)

  # Validate other required args
  # If x is missing, then assume user wants to use all columns as features.
  if (missing(x)) {
     if (is.numeric(y)) {
         x <- setdiff(col(training_frame), y)
     } else {
         x <- setdiff(colnames(training_frame), y)
     }
  }

  # Build parameter list to send to model builder
  parms <- list()
  parms$training_frame <- training_frame
  args <- .verify_dataxy(training_frame, x, y)
  if (!missing(treatment_column)) {
    parms$treatment_column <- treatment_column
  } else {
    stop("Treatment column is required.")  
  }
  parms$ignored_columns <- args$x_ignore
  parms$response_column <- args$y

  if (!missing(validation_frame))
    parms$validation_frame <- validation_frame
  if (!missing(score_each_iteration))
    parms$score_each_iteration <- score_each_iteration
  if (!missing(score_tree_interval))
    parms$score_tree_interval <- score_tree_interval
  if (!missing(ignore_const_cols))
    parms$ignore_const_cols <- ignore_const_cols
  if (!missing(ntrees))
    parms$ntrees <- ntrees
  if (!missing(max_depth))
    parms$max_depth <- max_depth
  if (!missing(min_rows))
    parms$min_rows <- min_rows
  if (!missing(nbins))
    parms$nbins <- nbins
  if (!missing(nbins_top_level))
    parms$nbins_top_level <- nbins_top_level
  if (!missing(nbins_cats))
    parms$nbins_cats <- nbins_cats
  if (!missing(max_runtime_secs))
    parms$max_runtime_secs <- max_runtime_secs
  if (!missing(seed))
    parms$seed <- seed
  if (!missing(mtries))
    parms$mtries <- mtries
  if (!missing(sample_rate))
    parms$sample_rate <- sample_rate
  if (!missing(sample_rate_per_class))
    parms$sample_rate_per_class <- sample_rate_per_class
  if (!missing(col_sample_rate_change_per_level))
    parms$col_sample_rate_change_per_level <- col_sample_rate_change_per_level
  if (!missing(col_sample_rate_per_tree))
    parms$col_sample_rate_per_tree <- col_sample_rate_per_tree
  if (!missing(histogram_type))
    parms$histogram_type <- histogram_type
  if (!missing(categorical_encoding))
    parms$categorical_encoding <- categorical_encoding
  if (!missing(check_constant_response))
    parms$check_constant_response <- check_constant_response
  if (!missing(uplift_metric))
    parms$uplift_metric <- uplift_metric
  if (!missing(auuc_type))
    parms$auuc_type <- auuc_type
  if (!missing(auuc_nbins))
    parms$auuc_nbins <- auuc_nbins

  if (!missing(distribution)) {
    warning("The only bernoulli distribution is supported for Uplift Random Forest.")
    parms$distribution <- 'bernoulli'
  }

  # Build segment-models specific parameters
  segment_parms <- list()
  if (!missing(segment_columns))
    segment_parms$segment_columns <- segment_columns
  if (!missing(segment_models_id))
    segment_parms$segment_models_id <- segment_models_id
  segment_parms$parallelism <- parallelism

  # Error check and build segment models
  segment_models <- .h2o.segmentModelsJob('upliftdrf', segment_parms, parms, h2oRestApiVersion=3)
  return(segment_models)
}

Try the h2o package in your browser

Any scripts or data that you put into this service are public.

h2o documentation built on Aug. 9, 2023, 9:06 a.m.