R/randomforest.R

Defines functions .h2o.train_segments_randomforest h2o.randomForest

Documented in h2o.randomForest

# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_R.py
# Copyright 2016 H2O.ai;  Apache License Version 2.0 (see LICENSE for details) 
#'
# -------------------------- Random Forest Model in H2O -------------------------- #
#'
#' Build a Random Forest model
#' 
#' Builds a Random Forest model on an H2OFrame.
#'
#' @param x (Optional) A vector containing the names or indices of the predictor variables to use in building the model.
#'        If x is missing, then all columns except y are used.
#' @param y The name or column index of the response variable in the data. 
#'        The response must be either a numeric or a categorical/factor variable. 
#'        If the response is numeric, then a regression model will be trained, otherwise it will train a classification model.
#' @param training_frame Id of the training data frame.
#' @param model_id Destination id for this model; auto-generated if not specified.
#' @param validation_frame Id of the validation data frame.
#' @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2). Defaults to 0.
#' @param keep_cross_validation_models \code{Logical}. Whether to keep the cross-validation models. Defaults to TRUE.
#' @param keep_cross_validation_predictions \code{Logical}. Whether to keep the predictions of the cross-validation models. Defaults to FALSE.
#' @param keep_cross_validation_fold_assignment \code{Logical}. Whether to keep the cross-validation fold assignment. Defaults to FALSE.
#' @param score_each_iteration \code{Logical}. Whether to score during each iteration of model training. Defaults to FALSE.
#' @param score_tree_interval Score the model after every so many trees. Disabled if set to 0. Defaults to 0.
#' @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will
#'        stratify the folds based on the response variable, for classification problems. Must be one of: "AUTO",
#'        "Random", "Modulo", "Stratified". Defaults to AUTO.
#' @param fold_column Column with cross-validation fold index assignment per observation.
#' @param ignore_const_cols \code{Logical}. Ignore constant columns. Defaults to TRUE.
#' @param offset_column Offset column. This argument is deprecated and has no use for Random Forest.
#' @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to excluding it from
#'        the dataset; giving an observation a relative weight of 2 is equivalent to repeating that row twice. Negative
#'        weights are not allowed. Note: Weights are per-row observation weights and do not increase the size of the
#'        data frame. This is typically the number of times a row is repeated, but non-integer values are supported as
#'        well. During training, rows with higher weights matter more, due to the larger loss function pre-factor. If
#'        you set weight = 0 for a row, the returned prediction frame at that row is zero and this is incorrect. To get
#'        an accurate prediction, remove all rows with weight == 0.
#' @param balance_classes \code{Logical}. Balance training data class counts via over/under-sampling (for imbalanced data). Defaults to
#'        FALSE.
#' @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will
#'        be automatically computed to obtain class balance during training. Requires balance_classes.
#' @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be less than 1.0). Requires
#'        balance_classes. Defaults to 5.0.
#' @param ntrees Number of trees. Defaults to 50.
#' @param max_depth Maximum tree depth (0 for unlimited). Defaults to 20.
#' @param min_rows Fewest allowed (weighted) observations in a leaf. Defaults to 1.
#' @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the best point
#'        Defaults to 20.
#' @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the root level, then
#'        decrease by factor of two per level Defaults to 1024.
#' @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best point. Higher
#'        values can lead to more overfitting. Defaults to 1024.
#' @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds, stopping_metric
#'        and stopping_tolerance instead. Previous version of H2O would stop making trees when the R^2 metric equals or
#'        exceeds this Defaults to 1.797693135e+308.
#' @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of length k of the
#'        stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable) Defaults to 0.
#' @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for regression and anomaly_score
#'        for Isolation Forest). Note that custom and custom_increasing can only be used in GBM and DRF with the Python
#'        client. Must be one of: "AUTO", "deviance", "logloss", "MSE", "RMSE", "MAE", "RMSLE", "AUC", "AUCPR",
#'        "lift_top_group", "misclassification", "mean_per_class_error", "custom", "custom_increasing". Defaults to
#'        AUTO.
#' @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is not at least this
#'        much) Defaults to 0.001.
#' @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable. Defaults to 0.
#' @param seed Seed for random numbers (affects certain parts of the algo that are stochastic and those might or might not be enabled by default).
#'        Defaults to -1 (time-based random number).
#' @param build_tree_one_node \code{Logical}. Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets.
#'        Defaults to FALSE.
#' @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p} for
#'        classification and p/3 for regression (where p is the # of predictors Defaults to -1.
#' @param sample_rate Row sample rate per tree (from 0.0 to 1.0) Defaults to 0.632.
#' @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to 1.0), for each tree
#' @param binomial_double_trees \code{Logical}. For binary classification: Build 2x as many trees (one per class) - can lead to higher
#'        accuracy. Defaults to FALSE.
#' @param checkpoint Model checkpoint to resume training with.
#' @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be > 0.0 and <= 2.0) Defaults to 1.
#' @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0) Defaults to 1.
#' @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen Defaults to 1e-05.
#' @param histogram_type What type of histogram to use for finding optimal split points Must be one of: "AUTO", "UniformAdaptive",
#'        "Random", "QuantilesGlobal", "RoundRobin", "UniformRobust". Defaults to AUTO.
#' @param categorical_encoding Encoding scheme for categorical features Must be one of: "AUTO", "Enum", "OneHotInternal", "OneHotExplicit",
#'        "Binary", "Eigen", "LabelEncoder", "SortByResponse", "EnumLimited". Defaults to AUTO.
#' @param calibrate_model \code{Logical}. Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
#'        probabilities. Calibration can provide more accurate estimates of class probabilities. Defaults to FALSE.
#' @param calibration_frame Data for model calibration
#' @param calibration_method Calibration method to use Must be one of: "AUTO", "PlattScaling", "IsotonicRegression". Defaults to AUTO.
#' @param distribution Distribution. This argument is deprecated and has no use for Random Forest.
#' @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
#' @param export_checkpoints_dir Automatically export generated models to this directory.
#' @param check_constant_response \code{Logical}. Check if response column is constant. If enabled, then an exception is thrown if the response
#'        column is a constant value.If disabled, then model will train regardless of the response column being a
#'        constant value or not. Defaults to TRUE.
#' @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic binning. Defaults to -1.
#' @param auc_type Set default multinomial AUC type. Must be one of: "AUTO", "NONE", "MACRO_OVR", "WEIGHTED_OVR", "MACRO_OVO",
#'        "WEIGHTED_OVO". Defaults to AUTO.
#' @param verbose \code{Logical}. Print scoring history to the console (Metrics per tree). Defaults to FALSE.
#' @return Creates a \linkS4class{H2OModel} object of the right type.
#' @seealso \code{\link{predict.H2OModel}} for prediction
#' @examples
#' \dontrun{
#' library(h2o)
#' h2o.init()
#' 
#' # Import the cars dataset
#' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv"
#' cars <- h2o.importFile(f)
#' 
#' # Set predictors and response; set response as a factor
#' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"])
#' predictors <- c("displacement", "power", "weight", "acceleration", "year")
#' response <- "economy_20mpg"
#' 
#' # Train the DRF model
#' cars_drf <- h2o.randomForest(x = predictors, y = response,
#'                             training_frame = cars, nfolds = 5,
#'                             seed = 1234)
#' }
#' @export
h2o.randomForest <- function(x,
                             y,
                             training_frame,
                             model_id = NULL,
                             validation_frame = NULL,
                             nfolds = 0,
                             keep_cross_validation_models = TRUE,
                             keep_cross_validation_predictions = FALSE,
                             keep_cross_validation_fold_assignment = FALSE,
                             score_each_iteration = FALSE,
                             score_tree_interval = 0,
                             fold_assignment = c("AUTO", "Random", "Modulo", "Stratified"),
                             fold_column = NULL,
                             ignore_const_cols = TRUE,
                             offset_column = NULL,
                             weights_column = NULL,
                             balance_classes = FALSE,
                             class_sampling_factors = NULL,
                             max_after_balance_size = 5.0,
                             ntrees = 50,
                             max_depth = 20,
                             min_rows = 1,
                             nbins = 20,
                             nbins_top_level = 1024,
                             nbins_cats = 1024,
                             r2_stopping = 1.797693135e+308,
                             stopping_rounds = 0,
                             stopping_metric = c("AUTO", "deviance", "logloss", "MSE", "RMSE", "MAE", "RMSLE", "AUC", "AUCPR", "lift_top_group", "misclassification", "mean_per_class_error", "custom", "custom_increasing"),
                             stopping_tolerance = 0.001,
                             max_runtime_secs = 0,
                             seed = -1,
                             build_tree_one_node = FALSE,
                             mtries = -1,
                             sample_rate = 0.632,
                             sample_rate_per_class = NULL,
                             binomial_double_trees = FALSE,
                             checkpoint = NULL,
                             col_sample_rate_change_per_level = 1,
                             col_sample_rate_per_tree = 1,
                             min_split_improvement = 1e-05,
                             histogram_type = c("AUTO", "UniformAdaptive", "Random", "QuantilesGlobal", "RoundRobin", "UniformRobust"),
                             categorical_encoding = c("AUTO", "Enum", "OneHotInternal", "OneHotExplicit", "Binary", "Eigen", "LabelEncoder", "SortByResponse", "EnumLimited"),
                             calibrate_model = FALSE,
                             calibration_frame = NULL,
                             calibration_method = c("AUTO", "PlattScaling", "IsotonicRegression"),
                             distribution = c("AUTO", "bernoulli", "multinomial", "gaussian", "poisson", "gamma", "tweedie", "laplace", "quantile", "huber"),
                             custom_metric_func = NULL,
                             export_checkpoints_dir = NULL,
                             check_constant_response = TRUE,
                             gainslift_bins = -1,
                             auc_type = c("AUTO", "NONE", "MACRO_OVR", "WEIGHTED_OVR", "MACRO_OVO", "WEIGHTED_OVO"),
                             verbose = FALSE)
{
  # Validate required training_frame first and other frame args: should be a valid key or an H2OFrame object
  training_frame <- .validate.H2OFrame(training_frame, required=TRUE)
  validation_frame <- .validate.H2OFrame(validation_frame, required=FALSE)

  # Validate other required args
  # If x is missing, then assume user wants to use all columns as features.
  if (missing(x)) {
     if (is.numeric(y)) {
         x <- setdiff(col(training_frame), y)
     } else {
         x <- setdiff(colnames(training_frame), y)
     }
  }

  # Build parameter list to send to model builder
  parms <- list()
  parms$training_frame <- training_frame
  args <- .verify_dataxy(training_frame, x, y)
  if( !missing(offset_column) && !is.null(offset_column))  args$x_ignore <- args$x_ignore[!( offset_column == args$x_ignore )]
  if( !missing(weights_column) && !is.null(weights_column)) args$x_ignore <- args$x_ignore[!( weights_column == args$x_ignore )]
  if( !missing(fold_column) && !is.null(fold_column)) args$x_ignore <- args$x_ignore[!( fold_column == args$x_ignore )]
  parms$ignored_columns <- args$x_ignore
  parms$response_column <- args$y

  if (!missing(model_id))
    parms$model_id <- model_id
  if (!missing(validation_frame))
    parms$validation_frame <- validation_frame
  if (!missing(nfolds))
    parms$nfolds <- nfolds
  if (!missing(keep_cross_validation_models))
    parms$keep_cross_validation_models <- keep_cross_validation_models
  if (!missing(keep_cross_validation_predictions))
    parms$keep_cross_validation_predictions <- keep_cross_validation_predictions
  if (!missing(keep_cross_validation_fold_assignment))
    parms$keep_cross_validation_fold_assignment <- keep_cross_validation_fold_assignment
  if (!missing(score_each_iteration))
    parms$score_each_iteration <- score_each_iteration
  if (!missing(score_tree_interval))
    parms$score_tree_interval <- score_tree_interval
  if (!missing(fold_assignment))
    parms$fold_assignment <- fold_assignment
  if (!missing(fold_column))
    parms$fold_column <- fold_column
  if (!missing(ignore_const_cols))
    parms$ignore_const_cols <- ignore_const_cols
  if (!missing(weights_column))
    parms$weights_column <- weights_column
  if (!missing(balance_classes))
    parms$balance_classes <- balance_classes
  if (!missing(class_sampling_factors))
    parms$class_sampling_factors <- class_sampling_factors
  if (!missing(max_after_balance_size))
    parms$max_after_balance_size <- max_after_balance_size
  if (!missing(ntrees))
    parms$ntrees <- ntrees
  if (!missing(max_depth))
    parms$max_depth <- max_depth
  if (!missing(min_rows))
    parms$min_rows <- min_rows
  if (!missing(nbins))
    parms$nbins <- nbins
  if (!missing(nbins_top_level))
    parms$nbins_top_level <- nbins_top_level
  if (!missing(nbins_cats))
    parms$nbins_cats <- nbins_cats
  if (!missing(r2_stopping))
    parms$r2_stopping <- r2_stopping
  if (!missing(stopping_rounds))
    parms$stopping_rounds <- stopping_rounds
  if (!missing(stopping_metric))
    parms$stopping_metric <- stopping_metric
  if (!missing(stopping_tolerance))
    parms$stopping_tolerance <- stopping_tolerance
  if (!missing(max_runtime_secs))
    parms$max_runtime_secs <- max_runtime_secs
  if (!missing(seed))
    parms$seed <- seed
  if (!missing(build_tree_one_node))
    parms$build_tree_one_node <- build_tree_one_node
  if (!missing(mtries))
    parms$mtries <- mtries
  if (!missing(sample_rate))
    parms$sample_rate <- sample_rate
  if (!missing(sample_rate_per_class))
    parms$sample_rate_per_class <- sample_rate_per_class
  if (!missing(binomial_double_trees))
    parms$binomial_double_trees <- binomial_double_trees
  if (!missing(checkpoint))
    parms$checkpoint <- checkpoint
  if (!missing(col_sample_rate_change_per_level))
    parms$col_sample_rate_change_per_level <- col_sample_rate_change_per_level
  if (!missing(col_sample_rate_per_tree))
    parms$col_sample_rate_per_tree <- col_sample_rate_per_tree
  if (!missing(min_split_improvement))
    parms$min_split_improvement <- min_split_improvement
  if (!missing(histogram_type))
    parms$histogram_type <- histogram_type
  if (!missing(categorical_encoding))
    parms$categorical_encoding <- categorical_encoding
  if (!missing(calibrate_model))
    parms$calibrate_model <- calibrate_model
  if (!missing(calibration_frame))
    parms$calibration_frame <- calibration_frame
  if (!missing(calibration_method))
    parms$calibration_method <- calibration_method
  if (!missing(custom_metric_func))
    parms$custom_metric_func <- custom_metric_func
  if (!missing(export_checkpoints_dir))
    parms$export_checkpoints_dir <- export_checkpoints_dir
  if (!missing(check_constant_response))
    parms$check_constant_response <- check_constant_response
  if (!missing(gainslift_bins))
    parms$gainslift_bins <- gainslift_bins
  if (!missing(auc_type))
    parms$auc_type <- auc_type

  if (!missing(distribution)) {
    warning("Argument distribution is deprecated and has no use for Random Forest.")
    parms$distribution <- 'AUTO'
  }
  if (!missing(offset_column)) {
    warning("Argument offset_column is deprecated and has no use for Random Forest.")
    parms$offset_column <- NULL
  }

  # Error check and build model
  model <- .h2o.modelJob('drf', parms, h2oRestApiVersion=3, verbose=verbose)
  return(model)
}
.h2o.train_segments_randomforest <- function(x,
                                             y,
                                             training_frame,
                                             validation_frame = NULL,
                                             nfolds = 0,
                                             keep_cross_validation_models = TRUE,
                                             keep_cross_validation_predictions = FALSE,
                                             keep_cross_validation_fold_assignment = FALSE,
                                             score_each_iteration = FALSE,
                                             score_tree_interval = 0,
                                             fold_assignment = c("AUTO", "Random", "Modulo", "Stratified"),
                                             fold_column = NULL,
                                             ignore_const_cols = TRUE,
                                             offset_column = NULL,
                                             weights_column = NULL,
                                             balance_classes = FALSE,
                                             class_sampling_factors = NULL,
                                             max_after_balance_size = 5.0,
                                             ntrees = 50,
                                             max_depth = 20,
                                             min_rows = 1,
                                             nbins = 20,
                                             nbins_top_level = 1024,
                                             nbins_cats = 1024,
                                             r2_stopping = 1.797693135e+308,
                                             stopping_rounds = 0,
                                             stopping_metric = c("AUTO", "deviance", "logloss", "MSE", "RMSE", "MAE", "RMSLE", "AUC", "AUCPR", "lift_top_group", "misclassification", "mean_per_class_error", "custom", "custom_increasing"),
                                             stopping_tolerance = 0.001,
                                             max_runtime_secs = 0,
                                             seed = -1,
                                             build_tree_one_node = FALSE,
                                             mtries = -1,
                                             sample_rate = 0.632,
                                             sample_rate_per_class = NULL,
                                             binomial_double_trees = FALSE,
                                             checkpoint = NULL,
                                             col_sample_rate_change_per_level = 1,
                                             col_sample_rate_per_tree = 1,
                                             min_split_improvement = 1e-05,
                                             histogram_type = c("AUTO", "UniformAdaptive", "Random", "QuantilesGlobal", "RoundRobin", "UniformRobust"),
                                             categorical_encoding = c("AUTO", "Enum", "OneHotInternal", "OneHotExplicit", "Binary", "Eigen", "LabelEncoder", "SortByResponse", "EnumLimited"),
                                             calibrate_model = FALSE,
                                             calibration_frame = NULL,
                                             calibration_method = c("AUTO", "PlattScaling", "IsotonicRegression"),
                                             distribution = c("AUTO", "bernoulli", "multinomial", "gaussian", "poisson", "gamma", "tweedie", "laplace", "quantile", "huber"),
                                             custom_metric_func = NULL,
                                             export_checkpoints_dir = NULL,
                                             check_constant_response = TRUE,
                                             gainslift_bins = -1,
                                             auc_type = c("AUTO", "NONE", "MACRO_OVR", "WEIGHTED_OVR", "MACRO_OVO", "WEIGHTED_OVO"),
                                             segment_columns = NULL,
                                             segment_models_id = NULL,
                                             parallelism = 1)
{
  # formally define variables that were excluded from function parameters
  model_id <- NULL
  verbose <- NULL
  destination_key <- NULL
  # Validate required training_frame first and other frame args: should be a valid key or an H2OFrame object
  training_frame <- .validate.H2OFrame(training_frame, required=TRUE)
  validation_frame <- .validate.H2OFrame(validation_frame, required=FALSE)

  # Validate other required args
  # If x is missing, then assume user wants to use all columns as features.
  if (missing(x)) {
     if (is.numeric(y)) {
         x <- setdiff(col(training_frame), y)
     } else {
         x <- setdiff(colnames(training_frame), y)
     }
  }

  # Build parameter list to send to model builder
  parms <- list()
  parms$training_frame <- training_frame
  args <- .verify_dataxy(training_frame, x, y)
  if( !missing(offset_column) && !is.null(offset_column))  args$x_ignore <- args$x_ignore[!( offset_column == args$x_ignore )]
  if( !missing(weights_column) && !is.null(weights_column)) args$x_ignore <- args$x_ignore[!( weights_column == args$x_ignore )]
  if( !missing(fold_column) && !is.null(fold_column)) args$x_ignore <- args$x_ignore[!( fold_column == args$x_ignore )]
  parms$ignored_columns <- args$x_ignore
  parms$response_column <- args$y

  if (!missing(validation_frame))
    parms$validation_frame <- validation_frame
  if (!missing(nfolds))
    parms$nfolds <- nfolds
  if (!missing(keep_cross_validation_models))
    parms$keep_cross_validation_models <- keep_cross_validation_models
  if (!missing(keep_cross_validation_predictions))
    parms$keep_cross_validation_predictions <- keep_cross_validation_predictions
  if (!missing(keep_cross_validation_fold_assignment))
    parms$keep_cross_validation_fold_assignment <- keep_cross_validation_fold_assignment
  if (!missing(score_each_iteration))
    parms$score_each_iteration <- score_each_iteration
  if (!missing(score_tree_interval))
    parms$score_tree_interval <- score_tree_interval
  if (!missing(fold_assignment))
    parms$fold_assignment <- fold_assignment
  if (!missing(fold_column))
    parms$fold_column <- fold_column
  if (!missing(ignore_const_cols))
    parms$ignore_const_cols <- ignore_const_cols
  if (!missing(weights_column))
    parms$weights_column <- weights_column
  if (!missing(balance_classes))
    parms$balance_classes <- balance_classes
  if (!missing(class_sampling_factors))
    parms$class_sampling_factors <- class_sampling_factors
  if (!missing(max_after_balance_size))
    parms$max_after_balance_size <- max_after_balance_size
  if (!missing(ntrees))
    parms$ntrees <- ntrees
  if (!missing(max_depth))
    parms$max_depth <- max_depth
  if (!missing(min_rows))
    parms$min_rows <- min_rows
  if (!missing(nbins))
    parms$nbins <- nbins
  if (!missing(nbins_top_level))
    parms$nbins_top_level <- nbins_top_level
  if (!missing(nbins_cats))
    parms$nbins_cats <- nbins_cats
  if (!missing(r2_stopping))
    parms$r2_stopping <- r2_stopping
  if (!missing(stopping_rounds))
    parms$stopping_rounds <- stopping_rounds
  if (!missing(stopping_metric))
    parms$stopping_metric <- stopping_metric
  if (!missing(stopping_tolerance))
    parms$stopping_tolerance <- stopping_tolerance
  if (!missing(max_runtime_secs))
    parms$max_runtime_secs <- max_runtime_secs
  if (!missing(seed))
    parms$seed <- seed
  if (!missing(build_tree_one_node))
    parms$build_tree_one_node <- build_tree_one_node
  if (!missing(mtries))
    parms$mtries <- mtries
  if (!missing(sample_rate))
    parms$sample_rate <- sample_rate
  if (!missing(sample_rate_per_class))
    parms$sample_rate_per_class <- sample_rate_per_class
  if (!missing(binomial_double_trees))
    parms$binomial_double_trees <- binomial_double_trees
  if (!missing(checkpoint))
    parms$checkpoint <- checkpoint
  if (!missing(col_sample_rate_change_per_level))
    parms$col_sample_rate_change_per_level <- col_sample_rate_change_per_level
  if (!missing(col_sample_rate_per_tree))
    parms$col_sample_rate_per_tree <- col_sample_rate_per_tree
  if (!missing(min_split_improvement))
    parms$min_split_improvement <- min_split_improvement
  if (!missing(histogram_type))
    parms$histogram_type <- histogram_type
  if (!missing(categorical_encoding))
    parms$categorical_encoding <- categorical_encoding
  if (!missing(calibrate_model))
    parms$calibrate_model <- calibrate_model
  if (!missing(calibration_frame))
    parms$calibration_frame <- calibration_frame
  if (!missing(calibration_method))
    parms$calibration_method <- calibration_method
  if (!missing(custom_metric_func))
    parms$custom_metric_func <- custom_metric_func
  if (!missing(export_checkpoints_dir))
    parms$export_checkpoints_dir <- export_checkpoints_dir
  if (!missing(check_constant_response))
    parms$check_constant_response <- check_constant_response
  if (!missing(gainslift_bins))
    parms$gainslift_bins <- gainslift_bins
  if (!missing(auc_type))
    parms$auc_type <- auc_type

  if (!missing(distribution)) {
    warning("Argument distribution is deprecated and has no use for Random Forest.")
    parms$distribution <- 'AUTO'
  }
  if (!missing(offset_column)) {
    warning("Argument offset_column is deprecated and has no use for Random Forest.")
    parms$offset_column <- NULL
  }

  # Build segment-models specific parameters
  segment_parms <- list()
  if (!missing(segment_columns))
    segment_parms$segment_columns <- segment_columns
  if (!missing(segment_models_id))
    segment_parms$segment_models_id <- segment_models_id
  segment_parms$parallelism <- parallelism

  # Error check and build segment models
  segment_models <- .h2o.segmentModelsJob('drf', segment_parms, parms, h2oRestApiVersion=3)
  return(segment_models)
}

Try the h2o package in your browser

Any scripts or data that you put into this service are public.

h2o documentation built on Aug. 9, 2023, 9:06 a.m.