Use `unifiedml` for benchmarking models

Classification

library(unifiedml)
library(randomForest)
library(e1071)
library(caret)
set.seed(123)

X <- iris[, 1:4]
y <- iris$Species

models <- list(
  glm  = Model$new(caret::train),
  rf   = Model$new(randomForest::randomForest),
  svm  = Model$new(e1071::svm)   
)

params <- list(
  glm = list(method = "glmnet",
             tuneGrid = data.frame(alpha = 0, lambda = 0.01),
             trControl = trainControl(method = "none")),

  rf  = list(ntree = 150),

  svm = list(kernel = "radial",   # <-- added
             cost = 1,
             gamma = 0.1)
)

results <- benchmark(models, X, y, cv = 5, params = params)
print(results)

Regression

library(unifiedml)
library(randomForest)
library(e1071)
library(caret)

set.seed(123)

# Regression data
X <- mtcars[, setdiff(names(mtcars), "mpg")]
y <- mtcars$mpg

models <- list(
  glm  = Model$new(caret::train),
  rf   = Model$new(randomForest::randomForest),
  svm  = Model$new(e1071::svm)
)

params <- list(
  glm = list(method = "glmnet",
             tuneGrid = data.frame(alpha = 0, lambda = 0.01),
             trControl = trainControl(method = "none")),

  rf  = list(ntree = 150),

  svm = list(type = "eps-regression",  # <-- important for regression
             kernel = "radial",
             cost = 1,
             gamma = 0.1)
)

results <- benchmark(models, X, y, cv = 5, params = params)
print(results)


Try the unifiedml package in your browser

Any scripts or data that you put into this service are public.

unifiedml documentation built on May 5, 2026, 9:06 a.m.