R/MLP.R

Defines functions trainMLP fitMLP setMLP

Documented in setMLP

# @file MLP.R
#
# Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of PatientLevelPrediction
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

#' Create setting for neural network model with python 
#' @param size       The number of hidden nodes
#' @param alpha      The l2 regularisation
#' @param maxIter    Maximum number of iterations. The solver iterates until convergence (determined by ‘tol’) or this number of iterations.
#' @param tol        Tolerance for the optimization
#' @param learningRateInit  The initial learning rate used. It controls the step-size in updating the weights.
#' @param nIterNoChange     Maximum number of epochs to not meet tol improvement.
#' @param beta1    Exponential decay rate for estimates of first moment vector in adam, should be in [0, 1).
#' @param beta2    Exponential decay rate for estimates of second moment vector in adam, should be in [0, 1).
#' @param epsilon  Value for numerical stability in adam.
#' @param seed       A seed for the model 
#'
#' @examples
#' \dontrun{
#' model.mlp <- setMLP(size=4, alpha=0.00001, seed=NULL)
#' }
#' @export
setMLP <- function(size=4, alpha=c(0.3,0.01,0.001,0.000001), maxIter = 2000, tol = 0.0001, 
                   learningRateInit = 0.001, nIterNoChange = 10,
                   beta1 = 0.9, beta2 = 0.999, epsilon = c(1,0.1,0.00000001), seed=NULL){
  
  if(!class(seed)%in%c('numeric','NULL','integer'))
    stop('Invalid seed')
  if(!class(size) %in% c("numeric", "integer"))
    stop('size must be a numeric value >0 ')
  if(min(size) < 1)
    stop('size must be greater that 0')
  if(!class(alpha) %in% c("numeric", "integer"))
    stop('alpha must be a numeric value >0')
  if(min(alpha) <= 0)
    stop('alpha must be greater that 0')
  
  if(!class(maxIter)%in%c('numeric','integer'))
    stop('Invalid maxIter')
  if(!class(tol)%in%c('numeric','integer'))
    stop('Invalid tol')
  if(!class(learningRateInit)%in%c('numeric','integer'))
    stop('Invalid learningRateInit')
  if(!class(nIterNoChange)%in%c('numeric','integer'))
    stop('Invalid nIterNoChange') 
  
  if(!class(beta1)%in%c('numeric','integer'))
    stop('Invalid beta1') 
  if(!class(beta2)%in%c('numeric','integer'))
    stop('Invalid beta2') 
  if(!class(epsilon)%in%c('numeric','integer'))
    stop('Invalid epsilon') 
  
  # set seed
  if(is.null(seed[1])){
    seed <- as.integer(sample(100000000,1))
  }
  
  result <- list(model='fitMLP', 
                 param= split(expand.grid(size=size, 
                                          alpha=alpha,
                                          maxIter = maxIter,
                                          tol = tol,
                                          learningRateInit = learningRateInit,
                                          nIterNoChange = nIterNoChange,
                                          beta1 = beta1,
                                          beta2 = beta2,
                                          epsilon = epsilon,
                                          seed=seed[1]),
                              1:(length(size)*length(alpha)*length(maxIter)*length(tol)*length(learningRateInit)*length(nIterNoChange)*length(beta1)*length(beta2)*length(epsilon))  ),
                 name='Neural network')
  class(result) <- 'modelSettings' 
  
  return(result)
}

fitMLP <- function(population, plpData, param, search='grid', quiet=F,
                      outcomeId, cohortId, ...){
  
  # check plpData is libsvm format or convert if needed
  if (!FeatureExtraction::isCovariateData(plpData$covariateData)){
    stop("Needs correct covariateData")
  }
  
  if(colnames(population)[ncol(population)]!='indexes'){
    warning('indexes column not present as last column - setting all index to 1')
    population$indexes <- rep(1, nrow(population))
  }
  
  # connect to python if not connected
  ##initiatePython()
  
  start <- Sys.time()
  
  population$rowIdPython <- population$rowId-1 # -1 to account for python/r index difference
  pPopulation <- as.matrix(population[,c('rowIdPython','outcomeCount','indexes')])
  
  # convert plpData in coo to python:
  x <- toSparseM(plpData,population, map=NULL)
  pydata <- reticulate::r_to_py(x$data)
  
  # save the model to outLoc  TODO: make this an input or temp location?
  outLoc <- createTempModelLoc()
  # clear the existing model pickles
  for(file in dir(outLoc))
    file.remove(file.path(outLoc,file))


  # do cross validation to find hyperParameter
  hyperParamSel <- lapply(param, function(x) do.call(trainMLP, listAppend(x,
                                                                          list(plpData = pydata,
                                                                          population = pPopulation,
                                                                          train=TRUE,
                                                                          quiet = quiet,
                                                                          modelOutput = outLoc)  )))

  
  cvAuc <- do.call(rbind, lapply(hyperParamSel, function(x) x$aucCV))
  colnames(cvAuc) <- paste0('fold_auc', 1:ncol(cvAuc))
  auc <- unlist(lapply(hyperParamSel, function(x) x$auc))
  
  cvPrediction <- lapply(hyperParamSel, function(x) x$prediction )
  cvPrediction <- cvPrediction[[which.max(auc)[1]]]
  
  hyperSummary <- cbind(do.call(rbind, param), cvAuc, auc= auc)
  
  #now train the final model and return coef
  bestInd <- which.max(abs(auc - 0.5))[1]
  finalModel <- do.call(trainMLP, listAppend(param[[bestInd]], 
                                             list(plpData = pydata,
                                                  population = pPopulation,
                                                  train=FALSE,
                                                  quiet = quiet,
                                                  modelOutput = outLoc)
                                             ))
  
  # get the coefs and do a basic variable importance:
  lev1 <- finalModel[[2]]
  lev2 <- finalModel[[3]]
  vals <- abs(lev1)%*%abs(lev2)
  varImp <- apply(vals, 1, function(x) sum(abs(x)))
  
  covariateRef <- as.data.frame(plpData$covariateData$covariateRef)
  incs <- rep(1, nrow(covariateRef))
  covariateRef$included <- incs
  covariateRef$covariateValue <- unlist(varImp)
  
    
  # select best model and remove the others  (!!!NEED TO EDIT THIS)
  modelTrained <- file.path(outLoc) 
  param.best <- param[[bestInd]]
  
  comp <- start-Sys.time()
  
  # train prediction
  pred <- finalModel[[1]]
  pred[,1] <- pred[,1] + 1 # converting from python to r index
  colnames(pred) <- c('rowId','outcomeCount','indexes', 'value')
  pred <- as.data.frame(pred)
  attr(pred, "metaData") <- list(predictionType="binary")
  prediction <- merge(population, pred[,c('rowId', 'value')], by='rowId')
  
  
  # return model location (!!!NEED TO ADD CV RESULTS HERE)
  result <- list(model = modelTrained,
                 trainCVAuc = list(value = unlist(cvAuc[bestInd,]),
                                   prediction = cvPrediction),
                 hyperParamSearch = hyperSummary,
                 modelSettings = list(model='fitMLP',modelParameters=param.best),
                 metaData = plpData$metaData,
                 populationSettings = attr(population, 'metaData'),
                 outcomeId=outcomeId,
                 cohortId=cohortId,
                 varImp = covariateRef,
                 trainingTime =comp,
                 dense=0,
                 covariateMap=x$map,
                 predictionTrain = prediction
  )
  class(result) <- 'plpModel'
  attr(result, 'type') <- 'pythonReticulate'
  attr(result, 'predictionType') <- 'binary'
  
  return(result)
}


trainMLP <- function(plpData, population, size=1, alpha=0.001, maxIter = 2000, tol = 0.0001, 
                     learningRateInit = 0.001, nIterNoChange=10, beta1 =0.9, beta2=0.999, epsilon =0.00000001,
                     seed=NULL, train=TRUE, quiet=F, modelOutput){

  e <- environment()
  reticulate::source_python(system.file(package='PatientLevelPrediction','python','mlpFunctions.py'), envir = e)
  
  result <- train_mlp(population = population, 
                      plpData = plpData, 
                      alpha =alpha, 
                      size = as.integer(size), 
                      maxIter = as.integer(maxIter),
                      tol = tol, 
                      learningRateInit = learningRateInit,
                      nIterNoChange = as.integer(nIterNoChange),
                      beta1 = beta1, 
                      beta2 = beta2, 
                      epsilon = epsilon,
                      seed = as.integer(seed), 
                      quiet = quiet, 
                      modelOutput = modelOutput,
                      train = train)
  
  if(train){
    # then get the prediction 
    pred <- result
    colnames(pred) <- c('rowId','outcomeCount','indexes', 'value')
    pred <- as.data.frame(pred)
    attr(pred, "metaData") <- list(predictionType="binary")
    
    aucCV <- lapply(1:max(pred$indexes), function(i){computeAuc(pred[pred$indexes==i,])})
    
    auc <- computeAuc(pred)
    writeLines(paste0('Model obtained CV AUC of ', auc))
    return(list(auc = auc, aucCV = aucCV, prediction = pred[pred$indexes>0,]))
  }
  
  return(result)
}
hxia/plp-git-demo documentation built on March 19, 2021, 1:54 a.m.