R/cross-validation.R

Defines functions cv.PO.EN

Documented in cv.PO.EN

#' Cross-validation function of PO-EN model
#'
#' Does k-fold cross-validation for PO-EN, produces a pair values of lambda and the prevalence parameter for an optimal fitting.
#'@usage cv.PO.EN(X, Y, alpha=0.5, o.iter=5, i.iter=20,
#'epsilon=1e-4,nfolds=10,type.measure='deviance',
#'depth=100,input.pi=0.5,a=sqrt(0.5),seed=1)
#'@param X Input design matrix. Should not include the intercept vector.
#'@param Y Response variable. Should be a binary vector.
#'@param o.iter Number of outer loop iteration.
#'@param i.iter Number of inner loop iteration.
#'@param alpha The elastic net mixing parameter, with \eqn{0\le}\code{alpha}\eqn{\le 1}.
#'@param epsilon The threshold for stopping the coordinate descent algorithm.
#'@param nfolds The number of folds for applying cross validation. The default setting is 10. The number of presence observations must be a multiple of \code{nfolds}.
#'@param type.measure The loss function to use for tuning lambda. The default is \code{type.measure='deviance'}. Other choices include
#'                    AUROC (\code{type.measure='auc'}) and F measure (\code{type.measure='F.measure'}).
#'@param depth The ratio between the largest lambda and the smallest lambda of the candidate sequence of lambda.
#'@param input.pi The user-supplied prevalence sequence.
#'@param a The parameter of F measure for tuning the true prevalence, the default value is \eqn{\sqrt{0.5}}.
#'@param seed A single value used for random number generation of the functions.
#'@return \tabular{ll}{
#'\tab \cr
#'\code{lambda.min} \tab value of lambda that returns the minimum (or maximum, \cr
#'  \tab depending on \code{type.measure}) of mean cross-validated error.\cr
#'  \tab\cr
#'\code{lambda.1se} \tab largest value of lambda such that error is within 1 standard error of the minimum.\cr
#'  \tab \cr
#'\code{pi} \tab value of the prevalence parameter that returns maximum F measure.\cr
#'}
#'@details The cross-validation function runs a n-folds cross-validation for selecting an optimal pair of lambda and the prevalence parameter.
#'The default setting is 10-folds cross validation. The candidate sequence of lambda is automatically generated by the function based on a warm start.
#'The values of \code{input.pi} should be supplied by users.
#'@examples
#'data(example.data) # example datasets, including training dataset and testing dataset
#'train_data<-example.data$train.data
#'y_train=train_data$response;x_train=train_data[,-1]  # response and design matrix of training data
#'\donttest{PO.EN.cv<-cv.PO.EN(x_train,y_train,input.pi=seq(0.01,0.4,length.out=4))}
#'\dontshow{PO.EN.cv<-list()
#'PO.EN.cv$lambda.min<-0.1
#'PO.EN.cv$pi<-0.2
#'}
#'PO.EN.beta<-PO.EN(x_train,y_train,lambda=PO.EN.cv$lambda.min,
#'            true.prob=PO.EN.cv$pi,beta_start=rep(0,ncol(x_train)+1))
cv.PO.EN<- function(X, Y, alpha=0.5, o.iter=5, i.iter=20, epsilon=1e-4,nfolds=10,type.measure='deviance',depth=100,input.pi=0.5,a=sqrt(0.5),seed=1){
  set.seed(seed = seed)

  f.fun<-function(classier,true.y){
    retrieved <- sum(classier)

    precision <- sum(classier & true.y) / retrieved

    precision <- ifelse(is.nan(precision),0,precision)

    recall <- sum(classier & true.y) / sum(true.y)

    Fmeasure <- (1+a^2) * precision * recall / (a^2*precision + recall)
    if(sum(precision + recall)==0){
      Fmeasure=0
    }

    stand.pre<-sum(true.y)/length(true.y)
    stand.recall<-1
    F.stand<-2*stand.pre/(1+stand.pre)
    if(Fmeasure==F.stand){
      Fmeasure=0
    }
    return(Fmeasure)
  }

  X<-as.matrix(X)
  pi.length<-length(input.pi)
  p<-dim(X)[2]
  N=dim(X)[1]

  n.l=sum(Y==1)
  n.u=sum(Y!=1)
  repeat{
    index.shuffle<-sample(1:N)
    Y<-Y[index.shuffle]
    indd<-stats::aggregate(Y,by=list(rep(1:nfolds,each=floor(length(Y)/nfolds))),FUN='sum')
    if(all(indd[,2]!=0)){
      break
    }
  }

  X<-X[index.shuffle,]

  start.function<-function(x,y,type.measure=type.measure,start.pi){


      glm.y<-function(h){
        en.fit<-glmnet::glmnet(cbind(1,h),y,family='binomial',intercept=FALSE,lambda=0)
        return((as.numeric(en.fit$beta)))
      }
      print(dim(x))

      a<-apply(as.matrix(x),2,glm.y)
      glm.re<-function(h){
        glm.re<-stats::glm.fit(h,y,family=stats::binomial(),intercept = TRUE)
        return(as.numeric(glm.re$coefficients))
      }
      b<-apply(as.matrix(x),2,glm.re)
      lambda.start=max(max(abs(a)),max(abs(b)))*10
      if(lambda.start==0){
        lambda.start=20
      }

    print(lambda.start)




    X.start.temp<-as.matrix(cbind(1,x))

    XtX.start.i=t(X.start.temp)%*%X.start.temp
    ytx.start.i=t(y)%*%X.start.temp
    XtX_reduce.start.i<-c()
    for(o in 1:ncol(XtX.start.i)){
      XtX_reduce.start.i<-cbind(XtX_reduce.start.i,XtX.start.i[o,-o])
    }

    repeat{
      start.s<-PO.EN(x,y,alpha = alpha,o.iter=10, i.iter=5, lambda=lambda.start,true.prob=start.pi,beta_start=rep(0,p+1),epsilon=epsilon,gram.input = TRUE,XtX.input = XtX.start.i,ytx.input=ytx.start.i,XtX_reduce.input =  XtX_reduce.start.i)
      if(sum(start.s[2:ncol(x)]!=0)==0){
        break
      }
      lambda.start=lambda.start*10

    }

    lambda.ratio<-200^(1/100)
    lambda.run<-lambda.start/(lambda.ratio)^(0:99)



    repeat{
      start.e<-PO.EN(x,y,alpha = alpha,o.iter=10, i.iter=5, lambda=lambda.run[length(lambda.run)],true.prob=start.pi,beta_start=rep(0,p+1),epsilon=epsilon,gram.input = TRUE,XtX.input = XtX.start.i,ytx.input=ytx.start.i,XtX_reduce.input =  XtX_reduce.start.i)
        if(sum(start.e[2:ncol(x)]!=0)!=0){
        break
      }

      lambda.start=lambda.run[length(lambda.run)]

      lambda.ratio<-200^(1/100)
      lambda.run<-lambda.start/(lambda.ratio)^(0:99)

      print(lambda.start)
    }



    s=1
    repeat{

      test.fit<-PO.EN(x,y,alpha = alpha,o.iter=10, i.iter=10, lambda=lambda.run[s],true.prob=start.pi,beta_start=rep(0,p+1),epsilon=epsilon,gram.input = TRUE,XtX.input = XtX.start.i,ytx.input=ytx.start.i,XtX_reduce.input =  XtX_reduce.start.i)
      if(sum(test.fit[2:ncol(x)]!=0)!=0){
        break
      }
      s=s+1
       }

    if(s!=1){
      lambda.start<-lambda.run[s-1]
    }else{
      lambda.start<-lambda.run[s]
    }

    print(paste0('THIS IS FINAL LAMBDA.START: ', lambda.start))
    lambda.ratio<-depth^(1/100)
    lambda.run<-c(lambda.start,lambda.start/(lambda.ratio)^(2:100))

    return(lambda.run)
  }

  lambda_run_list<-list()
  for(i in 1:pi.length){
    lambda_run_list[[i]]<-start.function(X,Y,type.measure=type.measure,start.pi=input.pi[i])
  }


  lambda_store<-c()
  dev.store<-c()
  direct.index='middle'

  result.list.F<-list()
  result.list.auc<-list()
  result.list.dev<-list()
  for(j in 1:pi.length){
    result.matrix.F<-matrix(NA,nrow=length(lambda_run_list[[j]]),ncol=nfolds)
    result.matrix.auc<-matrix(NA,nrow=length(lambda_run_list[[j]]),ncol=nfolds)
    result.matrix.dev<-matrix(NA,nrow=length(lambda_run_list[[j]]),ncol=nfolds)


    for(i in 1:nfolds){

      valid.index <- (i - 1) * floor(length(Y)/nfolds) + 1:floor(length(Y)/nfolds)
      X.train<-scale(as.matrix(X)[-valid.index,])
      X.temp<-cbind(1,X.train)
      Y.train<-Y[-valid.index]
      XtX.i=t(X.temp)%*%X.temp
      ytx.i=t(Y.train)%*%X.temp
      XtX_reduce.i<-c()
      for(o in 1:ncol(XtX.i)){
        XtX_reduce.i<-cbind(XtX_reduce.i,XtX.i[o,-o])
      }
      fit<-PO.EN(X.train,Y.train,alpha = alpha,o.iter=o.iter, i.iter=i.iter, lambda=lambda_run_list[[j]],true.prob=input.pi[j],beta_start=rep(0,p+1),epsilon=epsilon,gram.input = TRUE,XtX.input = XtX.i,ytx.input=ytx.i,XtX_reduce.input = XtX_reduce.i)


      X.t<-scale(as.matrix(cbind(X))[valid.index,])
      Y.t<-Y[valid.index]


      result.matrix.dev[,i]<-PUlasso::deviances(X.t,Y.t,input.pi[j],fit)

      predictions<-PO.EN.predict(X.t,fit)
      result.matrix.auc[,i]<-apply(predictions,2,function(x){pROC::auc(Y.t,x,quiet=TRUE)})
      predictions<-PO.EN.predict(X.t,fit)
      classification<-(predictions>=0.5)
      f.fun(as.numeric(classification[,1]),Y.t)
      result.matrix.F[,i]<-apply(classification,2,function(x){f.fun(x,Y.t)})

    }


    result.list.F[[j]]<-result.matrix.F
    result.list.auc[[j]]<-result.matrix.auc
    result.list.dev[[j]]<-result.matrix.dev

  }


  for(i in 1:pi.length){
    print(max(rowMeans( result.list.F[[i]])))
    print(which.max(rowMeans( result.list.F[[i]])))
    print(min(rowMeans( result.list.dev[[i]])))
    print(which.min(rowMeans( result.list.dev[[i]])))
    print(max(rowMeans( result.list.auc[[i]])))
    print(which.max(rowMeans( result.list.auc[[i]])))
    print('################')
  }

  mean.F<-c()
  for(j in 1:pi.length){
    mean.F<-c(mean.F,max(rowMeans( result.list.F[[j]])))
  }
  pi.index<-which.max(mean.F)

  result.min<-rep(0,pi.length)
  result.1se<-rep(0,pi.length)
  result.measure<-rep(0,pi.length)
  for(j in pi.index){


    if(type.measure=='deviance'){
      result.matrix<-result.list.dev[[j]]/length(Y.t)
      mean.run<-apply(result.matrix,1,mean)
      sd.run<-apply(result.matrix,1,stats::sd)/sqrt(nfolds)
      min.index<-which.min(mean.run)
      lambda.min<-lambda_run_list[[j]][ min.index]
      lambda.1se<-max(lambda_run_list[[j]][which(mean.run<(mean.run[min.index]+sd.run[min.index]))   ])
      result.min<-lambda.min
      result.1se<-lambda.1se
      result.pi<-input.pi[pi.index]
    }
    if(type.measure=='auc'){
      result.matrix<-result.list.auc[[j]]/length(Y.t)
      mean.run<-apply(result.matrix,1,mean)
      sd.run<-apply(result.matrix,1,stats::sd)/sqrt(nfolds)

      max.index<-which.max(mean.run)
      lambda.min<-lambda_run_list[[j]][max.index]
      lambda.1se<-max(lambda_run_list[[j]][which(mean.run>(mean.run[max.index]-sd.run[max.index]))   ])
      result.min<-lambda.min
      result.1se<-lambda.1se
      result.pi<-input.pi[pi.index]
    }
    if(type.measure=='F.measure'){
      result.matrix<-result.list.F[[j]]/length(Y.t)
      mean.run<-apply(result.matrix,1,mean)
      sd.run<-apply(result.matrix,1,stats::sd)/sqrt(nfolds)

      max.index<-which.max(mean.run)
      lambda.min<-lambda_run_list[[j]][max.index]
      lambda.1se<-max(lambda_run_list[[j]][which(mean.run>(mean.run[max.index]-sd.run[max.index]))   ])
      result.min<-lambda.min
      result.1se<-lambda.1se
      result.pi<-input.pi[pi.index]
    }

  }


  return(list(lambda.min=lambda.min,lambda.1se=lambda.1se,pi=result.pi))
}

Try the PO.EN package in your browser

Any scripts or data that you put into this service are public.

PO.EN documentation built on Aug. 19, 2020, 9:06 a.m.