R/RcppExports.R

# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393

#' @title K fold (c++)
#' @description creates vector of shuffled indices.
#' @param n number of elements.
#' @param K number of folds.
#' @keywords internal
#'
NULL

#' @title CV ADMM penalized precision matrix estimation (c++)
#' @description Cross validation function for ADMMsigma.
#'
#' @param X option to provide a nxp matrix. Each row corresponds to a single observation and each column contains n observations of a single feature/variable.
#' @param S option to provide a pxp sample covariance matrix (denominator n). If argument is \code{NULL} and \code{X} is provided instead then \code{S} will be computed automatically.
#' @param lam positive tuning parameters for elastic net penalty. If a vector of parameters is provided, they should be in increasing order.
#' @param alpha elastic net mixing parameter contained in [0, 1]. \code{0 = ridge, 1 = lasso}. If a vector of parameters is provided, they should be in increasing order.
#' @param diagonal option to penalize the diagonal elements of the estimated precision matrix (\eqn{\Omega}). Defaults to \code{FALSE}.
#' @param path option to return the regularization path. This option should be used with extreme care if the dimension is large. If set to TRUE, cores will be set to 1 and errors and optimal tuning parameters will based on the full sample. Defaults to FALSE.
#' @param rho initial step size for ADMM algorithm.
#' @param mu factor for primal and residual norms in the ADMM algorithm. This will be used to adjust the step size \code{rho} after each iteration.
#' @param tau_inc factor in which to increase step size \code{rho}
#' @param tau_dec factor in which to decrease step size \code{rho}
#' @param crit criterion for convergence (\code{ADMM} or \code{loglik}). If \code{crit = loglik} then iterations will stop when the relative change in log-likelihood is less than \code{tol.abs}. Default is \code{ADMM} and follows the procedure outlined in Boyd, et al.
#' @param tol_rel relative convergence tolerance. Defaults to 1e-4.
#' @param maxit maximum number of iterations. Defaults to 1e4.
#' @param adjmaxit adjusted maximum number of iterations. During cross validation this option allows the user to adjust the maximum number of iterations after the first \code{lam} tuning parameter has converged (for each \code{alpha}). This option is intended to be paired with \code{warm} starts and allows for "one-step" estimators. Defaults to 1e4.
#' @param K specify the number of folds for cross validation.
#' @param crit_cv cross validation criterion (\code{loglik} \code{penloglik}, \code{AIC}, or \code{BIC}). Defaults to \code{loglik}.
#' @param start specify \code{warm} or \code{cold} start for cross validation. Default is \code{warm}.
#' @param trace option to display progress of CV. Choose one of \code{progress} to print a progress bar, \code{print} to print completed tuning parameters, or \code{none}.
#' 
#' @return list of returns includes:
#' \item{lam}{optimal tuning parameter.}
#' \item{alpha}{optimal tuning parameter.}
#' \item{path}{array containing the solution path. Solutions will be ordered in ascending alpha values for each lambda.}
#' \item{min.error}{minimum average cross validation error (cv_crit) for optimal parameters.}
#' \item{avg.error}{average cross validation error (cv_crit) across all folds.}
#' \item{cv.error}{cross validation errors (cv_crit).}
#' 
#' @keywords internal
#'
CV_ADMMc <- function(X, S, lam, alpha, diagonal = FALSE, path = FALSE, rho = 2, mu = 10, tau_inc = 2, tau_dec = 2, crit = "ADMM", tol_abs = 1e-4, tol_rel = 1e-4, maxit = 1e4L, adjmaxit = 1e4L, K = 5L, crit_cv = "loglik", start = "warm", trace = "progress") {
    .Call('_ADMMsigma_CV_ADMMc', PACKAGE = 'ADMMsigma', X, S, lam, alpha, diagonal, path, rho, mu, tau_inc, tau_dec, crit, tol_abs, tol_rel, maxit, adjmaxit, K, crit_cv, start, trace)
}

#' @title CV ridge penalized precision matrix estimation (c++)
#' @description Cross validation function for RIDGEsigma.
#' 
#' @param X option to provide a nxp matrix. Each row corresponds to a single observation and each column contains n observations of a single feature/variable.
#' @param S option to provide a pxp sample covariance matrix (denominator n). If argument is \code{NULL} and \code{X} is provided instead then \code{S} will be computed automatically.
#' @param lam positive tuning parameters for ridge penalty. If a vector of parameters is provided, they should be in increasing order. Defaults to grid of values \code{10^seq(-5, 5, 0.5)}.
#' @param path option to return the regularization path. This option should be used with extreme care if the dimension is large. If set to TRUE, cores will be set to 1 and errors and optimal tuning parameters will based on the full sample. Defaults to FALSE.
#' @param K specify the number of folds for cross validation.
#' @param trace option to display progress of CV. Choose one of \code{progress} to print a progress bar, \code{print} to print completed tuning parameters, or \code{none}.
#' 
#' @return list of returns includes:
#' \item{lam}{optimal tuning parameter.}
#' \item{path}{array containing the solution path. Solutions are ordered dense to sparse.}
#' \item{min.error}{minimum average cross validation error for optimal parameters.}
#' \item{avg.error}{average cross validation error across all folds.}
#' \item{cv.error}{cross validation errors (negative validation likelihood).}
#'
#' @keywords internal
#'
CV_RIDGEc <- function(X, S, lam, path = FALSE, K = 3L, trace = "none") {
    .Call('_ADMMsigma_CV_RIDGEc', PACKAGE = 'ADMMsigma', X, S, lam, path, K, trace)
}

#' @title CV (no folds) ADMM penalized precision matrix estimation (c++)
#' @description Cross validation (no folds) function for ADMMsigma. This function is to be used with CVP_ADMM.
#'
#' @param n sample size for X_valid (used to calculate crit_cv)
#' @param S_train pxp sample covariance matrix for training data (denominator n).
#' @param S_valid pxp sample covariance matrix for validation data (denominator n).
#' @param lam positive tuning parameters for elastic net penalty. If a vector of parameters is provided, they should be in increasing order.
#' @param alpha elastic net mixing parameter contained in [0, 1]. \code{0 = ridge, 1 = lasso}. If a vector of parameters is provided, they should be in increasing order.
#' @param diagonal option to penalize the diagonal elements of the estimated precision matrix (\eqn{\Omega}). Defaults to \code{FALSE}.
#' @param rho initial step size for ADMM algorithm.
#' @param mu factor for primal and residual norms in the ADMM algorithm. This will be used to adjust the step size \code{rho} after each iteration.
#' @param tau_inc factor in which to increase step size \code{rho}
#' @param tau_dec factor in which to decrease step size \code{rho}
#' @param crit criterion for convergence (\code{ADMM} or \code{loglik}). If \code{crit = loglik} then iterations will stop when the relative change in log-likelihood is less than \code{tol.abs}. Default is \code{ADMM} and follows the procedure outlined in Boyd, et al.
#' @param tol_abs absolute convergence tolerance. Defaults to 1e-4.
#' @param tol_rel relative convergence tolerance. Defaults to 1e-4.
#' @param maxit maximum number of iterations. Defaults to 1e4.
#' @param adjmaxit adjusted maximum number of iterations. During cross validation this option allows the user to adjust the maximum number of iterations after the first \code{lam} tuning parameter has converged (for each \code{alpha}). This option is intended to be paired with \code{warm} starts and allows for "one-step" estimators. Defaults to 1e4.
#' @param crit_cv cross validation criterion (\code{loglik}, \code{penloglik}, \code{AIC}, or \code{BIC}). Defaults to \code{loglik}.
#' @param start specify \code{warm} or \code{cold} start for cross validation. Default is \code{warm}.
#' @param trace option to display progress of CV. Choose one of \code{progress} to print a progress bar, \code{print} to print completed tuning parameters, or \code{none}.
#' 
#' @return cross validation errors (cv_crit)
#' 
#' @keywords internal
#'
CVP_ADMMc <- function(n, S_train, S_valid, lam, alpha, diagonal = FALSE, rho = 2, mu = 10, tau_inc = 2, tau_dec = 2, crit = "ADMM", tol_abs = 1e-4, tol_rel = 1e-4, maxit = 1e4L, adjmaxit = 1e4L, crit_cv = "loglik", start = "warm", trace = "progress") {
    .Call('_ADMMsigma_CVP_ADMMc', PACKAGE = 'ADMMsigma', n, S_train, S_valid, lam, alpha, diagonal, rho, mu, tau_inc, tau_dec, crit, tol_abs, tol_rel, maxit, adjmaxit, crit_cv, start, trace)
}

#' @title CV (no folds) RIDGE penalized precision matrix estimation (c++)
#' @description Cross validation (no folds) function for RIDGEsigma. This function is to be used with CVP_RIDGE.
#'
#' @param n sample size for X_valid (used to calculate CV_error)
#' @param S_train pxp sample covariance matrix for training data (denominator n).
#' @param S_valid pxp sample covariance matrix for validation data (denominator n).
#' @param lam positive tuning parameters for ridge penalty. If a vector of parameters is provided, they should be in increasing order.
#' @param trace option to display progress of CV. Choose one of \code{progress} to print a progress bar, \code{print} to print completed tuning parameters, or \code{none}.
#' 
#' @return cross validation errors (negative validation likelihood)
#' 
#' @keywords internal
#'
CVP_RIDGEc <- function(n, S_train, S_valid, lam, trace = "none") {
    .Call('_ADMMsigma_CVP_RIDGEc', PACKAGE = 'ADMMsigma', n, S_train, S_valid, lam, trace)
}

#' @title Ridge-penalized precision matrix estimation (c++)
#' @description Ridge penalized matrix estimation via closed-form solution. Augmented from Adam Rothman's STAT 8931 code.
#'
#' @param S sample covariance matrix (denominator n).
#' @param lam tuning parameter for ridge penalty.
#' 
#' @return estimated Omega
#' 
#' @export
#' 
#' @keywords internal
#'
RIDGEc <- function(S, lam) {
    .Call('_ADMMsigma_RIDGEc', PACKAGE = 'ADMMsigma', S, lam)
}

#' @title Penalized precision matrix estimation via ADMM (c++)
#' 
#' @description Penalized precision matrix estimation using the ADMM algorithm
#' 
#' @details For details on the implementation of 'ADMMsigma', see the vignette
#' \url{https://mgallow.github.io/ADMMsigma/}.
#'
#' @param S pxp sample covariance matrix (denominator n).
#' @param initOmega initialization matrix for Omega
#' @param initZ initialization matrix for Z
#' @param initY initialization matrix for Y
#' @param lam postive tuning parameter for elastic net penalty.
#' @param alpha elastic net mixing parameter contained in [0, 1]. \code{0 = ridge, 1 = lasso}. Defaults to alpha = 1.
#' @param diagonal option to penalize the diagonal elements of the estimated precision matrix (\eqn{\Omega}). Defaults to \code{FALSE}.
#' @param rho initial step size for ADMM algorithm.
#' @param mu factor for primal and residual norms in the ADMM algorithm. This will be used to adjust the step size \code{rho} after each iteration.
#' @param tau_inc factor in which to increase step size \code{rho}.
#' @param tau_dec factor in which to decrease step size \code{rho}.
#' @param crit criterion for convergence (\code{ADMM} or \code{loglik}). If \code{crit = loglik} then iterations will stop when the relative change in log-likelihood is less than \code{tol.abs}. Default is \code{ADMM} and follows the procedure outlined in Boyd, et al.
#' @param tol_abs absolute convergence tolerance. Defaults to 1e-4.
#' @param tol_rel relative convergence tolerance. Defaults to 1e-4.
#' @param maxit maximum number of iterations. Defaults to 1e4.
#' 
#' @return returns list of returns which includes:
#' \item{Iterations}{number of iterations.}
#' \item{lam}{optimal tuning parameters.}
#' \item{alpha}{optimal tuning parameter.}
#' \item{Omega}{estimated penalized precision matrix.}
#' \item{Z2}{estimated Z matrix.}
#' \item{Y}{estimated Y matrix.}
#' \item{rho}{estimated rho.}
#' 
#' @references
#' \itemize{
#' \item Boyd, Stephen, Neal Parikh, Eric Chu, Borja Peleato, Jonathan Eckstein, and others. 2011. 'Distributed Optimization and Statistical Learning via the Alternating Direction Method of Multipliers.' \emph{Foundations and Trends in Machine Learning} 3 (1). Now Publishers, Inc.: 1-122. \url{https://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf}
#' \item Hu, Yue, Chi, Eric C, amd Allen, Genevera I. 2016. 'ADMM Algorithmic Regularization Paths for Sparse Statistical Machine Learning.' \emph{Splitting Methods in Communication, Imaging, Science, and Engineering}. Springer: 433-459.
#' \item Zou, Hui and Hastie, Trevor. 2005. "Regularization and Variable Selection via the Elastic Net." \emph{Journal of the Royal Statistial Society: Series B (Statistical Methodology)} 67 (2). Wiley Online Library: 301-320.
#' \item Rothman, Adam. 2017. 'STAT 8931 notes on an algorithm to compute the Lasso-penalized Gaussian likelihood precision matrix estimator.'
#' }
#' 
#' @author Matt Galloway \email{gall0441@@umn.edu}
#' 
#' @export
#' 
#' @keywords internal
#'
ADMMc <- function(S, initOmega, initZ, initY, lam, alpha = 1, diagonal = FALSE, rho = 2, mu = 10, tau_inc = 2, tau_dec = 2, crit = "ADMM", tol_abs = 1e-4, tol_rel = 1e-4, maxit = 1e4L) {
    .Call('_ADMMsigma_ADMMc', PACKAGE = 'ADMMsigma', S, initOmega, initZ, initY, lam, alpha, diagonal, rho, mu, tau_inc, tau_dec, crit, tol_abs, tol_rel, maxit)
}

Try the ADMMsigma package in your browser

Any scripts or data that you put into this service are public.

ADMMsigma documentation built on May 2, 2019, 6:23 a.m.