knitr::opts_chunk$set(
  collapse = TRUE,
  comment = "#>",
  message = FALSE,
  fig.path = "README-"
)
writeLines(capture.output(devtools::session_info()), "session_info.txt")

hyperband in R6

This is a very generic R6 implementation of the hyperband algorithm for hyperparameter optimization (https://arxiv.org/pdf/1603.06560.pdf)

The project is not yet finished but can already be used on your own problems and should work with any other R package/algorithm as long as it is suitable for hyperband.

Please check the vignette folder for a very in-depth explanation + exhaustive examples on how to use the package and in particular, how to exploit the R6 class system in order to combine hyperband with MBO.

#######################################
############## packages ###############
#######################################

library("R6")
library("devtools")
load_all()
library("mxnet")
library("mlr") # you might need to install mxnet branch of mlr: devtools::install_github("mlr-org/mlr", ref = "mxnet")
library("ggplot2")
library("gridExtra")
library("dplyr")
library("data.table")

####################################
## define the problem to optimize ##
####################################

# read mini_mnist (1/10 of actual mnist for faster evaluation, evenly distributed classes)
train = fread("mnist/train.csv", header = TRUE)
test = fread("mnist/test.csv", header = TRUE)

# Some operations to normalize features
mnist = as.data.frame(rbind(train, test))
mnist = mnist[sample(nrow(mnist)), ]
mnist[, 2:785] = lapply(mnist[, 2:785], function(x) x/255)

train.x = train[, -1]
train.x = t(train.x/255)

Let us use hyperbandr in order to tune the hyperparameters of a neural network on the famous MNIST data (LeCun & Cortes 2010).

To this, we use mxnet and mlr.

\vspace{10pt}

plots = 64
visualize_this = sample(dim(train.x)[2], plots)
par(mfrow = c(8, plots/8), cex = 0.05)

for(i in 1:plots){
  train_vis = train.x[1:784, visualize_this[i]]
  train_mat = matrix(train_vis, nrow = 28, ncol = 28, byrow = TRUE)
  train_mat = apply(train_mat, 2 , rev)
  image(t(train_mat), axes = FALSE, col = grey(seq(from = 0, to = 1, length = 255)))
}

rm(train)
rm(train.x)
rm(test)

\vspace{10pt}

For convenience, we only use 1/10 of the original data.

\vspace{10pt}

# We sample 2/3 of our data for training
train.set = sample(nrow(mnist), size = (2/3)*nrow(mnist))

# Another 1/6 will be used for validation during training
val.set = sample(setdiff(1:nrow(mnist), train.set), 1000)

# The remaining 1/6 will be stored for testing
test.set = setdiff(1:nrow(mnist), c(train.set, val.set))

# Since we use mlr, we define a classification task to encapsulate the data
task = makeClassifTask(data = mnist, target = "label")

# Finally, we define the problem list
problem = list(data = task, train = train.set, val = val.set, test = test.set)

\vspace{10pt}

At first we define a search space. The ParamHelpers package provides an easy way to construct the latter one.

\vspace{10pt}

library("ParamHelpers")
configSpace = makeParamSet(
  makeDiscreteParam(id = "optimizer", values = c("sgd", "rmsprop", "adam", "adagrad")),
  makeNumericParam(id = "learning.rate", lower = 0.001, upper = 0.1),
  makeNumericParam(id = "wd", lower = 0, upper = 0.01),
  makeNumericParam(id = "dropout.input", lower = 0, upper = 0.6),
  makeNumericParam(id = "dropout.layer1", lower = 0, upper = 0.6),
  makeNumericParam(id = "dropout.layer2", lower = 0, upper = 0.6),
  makeNumericParam(id = "dropout.layer3", lower = 0, upper = 0.6),
  makeLogicalParam(id = "batch.normalization1"),
  makeLogicalParam(id = "batch.normalization2"),
  makeLogicalParam(id = "batch.normalization3")
)

\vspace{10pt}

Now we need a function to sample configurations from our search space.

\vspace{10pt}

sample.fun = function(par.set, n.configs, ...) {
  # sample from the par.set and remove all NAs
  lapply(sampleValues(par = par.set, n = n.configs), function(x) x[!is.na(x)])
}

\vspace{10pt}

.. as well as a function to initialize models ..

\vspace{10pt}

init.fun = function(r, config, problem) {
  lrn = makeLearner("classif.mxff",
    # you may have to install mxnet gpu, else just set ctx = mx.cpu()
    ctx = mx.gpu(),
    # we define a small CNN architecture with two conv and two dense layers
    # (the second dense layer is our output and will be created automatically by mlr)
    layers = 3, 
    conv.layer1 = TRUE, conv.layer2 = TRUE,
    conv.data.shape = c(28, 28),
    num.layer1 = 8, num.layer2 = 16, num.layer3 = 64,
    conv.kernel1 = c(3,3), conv.stride1 = c(1,1), 
    pool.kernel1 = c(2,2), pool.stride1 = c(2,2),
    conv.kernel2 = c(3,3), conv.stride2 = c(1,1), 
    pool.kernel2 = c(2,2), pool.stride2 = c(2,2),           
    array.batch.size = 128,
    # we initialize our model with r iterations
    begin.round = 1, num.round = r,
    # here we allocate the configuration from our sample function
    par.vals = config
  )
  mod = train(learner = lrn, task = problem$data, subset = problem$train)
  return(mod)
}

\vspace{10pt}

After each step of successive halving, hyperbandr continues training the remaining model instead of training from scratch. This will greatly speed training time. Thus, we need a function to continue the training of our models ..

We're planning to add training from scratch as well. That might be necessary if the architecture memory requirements become to big.

\vspace{10pt}

train.fun = function(mod, budget, problem) {
  # we create a new learner and assign all parameters from our model
  lrn = makeLearner("classif.mxff", ctx = mx.gpu(), par.vals = mod$learner$par.vals)
  lrn = setHyperPars(lrn,
    # in addition, we have to extract the weights and feed them into our new model 
    symbol = mod$learner.model$symbol,
    arg.params = mod$learner.model$arg.params,
    aux.params = mod$learner.model$aux.params,
    begin.round = mod$learner$par.vals$begin.round + mod$learner$par.vals$num.round,
    num.round = budget
  )
  mod = train(learner = lrn, task = problem$data, subset = problem$train)
  return(mod)
}

\vspace{10pt}

.. and last but not least a function to measure the performance of our model at each step of successive halving:

\vspace{10pt}

performance.fun = function(model, problem) {
  # predict the validation data
  pred = predict(model, task = problem$data, subset = problem$val)
  # we choose accuracy as our performance measure
  performance(pred, measures = acc)
}

\vspace{10pt}

Now we can call hyperband (with these hyperparameters, one run needs like 5 minutes on a GTX 1070):

\vspace{10pt}

hyperhyper = hyperband(
  problem = problem,
  max.resources = 81, 
  prop.discard = 3,
  max.perf = TRUE,
  id = "nnet", 
  par.set = configSpace, 
  sample.fun =  sample.fun,
  init.fun = init.fun,
  train.fun = train.fun, 
  performance.fun = performance.fun)

\vspace{10pt}

With max.resources = 81 and prop.discard = 3, we obtain a total of 5 brackets:

\vspace{10pt}

length(hyperhyper)

\vspace{10pt}

We can inspect the first bracket ..

\vspace{10pt}

hyperhyper[[1]]

\vspace{10pt}

.. and for instance check it's performance by calling the getPerformance() method:

\vspace{10pt}

hyperhyper[[1]]$getPerformances()

\vspace{10pt}

We can also inspect the architecture of the best model of bracket 1:

\vspace{10pt}

hyperhyper[[1]]$models[[1]]$model

\vspace{10pt}

Let's see which bracket yielded the best performance:

\vspace{10pt}

lapply(hyperhyper, function(x) x$getPerformances())

\vspace{10pt}

We can call the hyperVis function to visualize all brackets:

\vspace{10pt}

hyperVis(hyperhyper, perfLimits = c(0, 1))

\vspace{10pt}

Now we use the best model and predict test data:

\vspace{10pt}

best.mod.index = which.max(unlist(lapply(hyperhyper, function(x) x$getPerformances())))
best.mod = hyperhyper[[best.mod.index]]$models[[1]]$model

performance(predict(object = best.mod, task = problem$data, subset = problem$test), 
            measures = acc)

\vspace{10pt}

The example folder contains six detailed examples:



ja-thomas/hyperbandr documentation built on May 6, 2019, 8:33 p.m.