R/RcppExports.R

Defines functions soft_threshold close_update inverse_update elliproj_u rootfind elliproj_w rowadmm rowadmm_lasso varband

Documented in varband

# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393

soft_threshold <- function(beta, lam, r, result) {
    invisible(.Call('varband_soft_threshold', PACKAGE = 'varband', beta, lam, r, result))
}

close_update <- function(S, S_inv, r, rho, u, gamma, res) {
    invisible(.Call('varband_close_update', PACKAGE = 'varband', S, S_inv, r, rho, u, gamma, res))
}

inverse_update <- function(S, rho, S_inv) {
    invisible(.Call('varband_inverse_update', PACKAGE = 'varband', S, rho, S_inv))
}

elliproj_u <- function(y, tau, pp) {
    invisible(.Call('varband_elliproj_u', PACKAGE = 'varband', y, tau, pp))
}

rootfind <- function(pp, ww, tau, l) {
    .Call('varband_rootfind', PACKAGE = 'varband', pp, ww, tau, l)
}

elliproj_w <- function(y, tau, pp) {
    invisible(.Call('varband_elliproj_w', PACKAGE = 'varband', y, tau, pp))
}

rowadmm <- function(S, init_row, lambda, w = FALSE, tol = 1.0e-4, itermax = 1e+6L) {
    .Call('varband_rowadmm', PACKAGE = 'varband', S, init_row, lambda, w, tol, itermax)
}

rowadmm_lasso <- function(S, init_row, lambda, tol = 1.0e-4, itermax = 1e+6L) {
    .Call('varband_rowadmm_lasso', PACKAGE = 'varband', S, init_row, lambda, tol, itermax)
}

#' Compute the varband estimate for a fixed tuning parameter value with different penalty options.
#'
#' Solves the main optimization problem in Yu & Bien (2017):
#' \deqn{min_L -2 \sum_{r=1}^p L_{rr} + tr(SLL^T) + lam * \sum_{r=2}^p P_r(L_{r.})}{min_L -2 sum_{r=1}^p L_{rr} + tr(SLL^T) + lam * sum_{r=2}^p P_r(L_{r.})}
#' where \deqn{P_r(L_{r.}) = \sum_{\ell = 2}^{r-1} \left(\sum_{m=1}^\ell w_{\ell m}^2 L_{rm}^2\right)^{1/2}}{P_r(L_r.) = sum_{l=2}^{r-1} (sum_m=1^l w^2_lm L^2_rm)^{1/2}}
#' or \deqn{P_r(L_{r.}) = \sum_{\ell = 1}^{r-1} |L_{r\ell}|}
#'
#' The function decomposes into p independent row problems,
#' each of which is solved by an ADMM algorithm.
#' see paper for more explanation.
#' @param S The sample covariance matrix
#' @param lambda Non-negative tuning parameter. Controls sparsity level.
#' @param init Initial estimate of L. Default is a closed-form diagonal estimate of L.
#' @param K Integer between 0 and p - 1 (default), indicating the maximum bandwidth in the resulting estimate. A small value of K will result in a sparse estimate and small computing time.
#' @param w Logical. Should we use weighted version of the penalty or not? If \code{TRUE}, we use general weight. If \code{FALSE}, use unweighted penalty. Default is \code{FALSE}.
#' @param lasso Logical. Should we use l1 penalty instead of hierarchical group lasso penalty? Note that by using l1 penalty, we lose the banded structure in the resulting estimate. Default is \code{FALSE}.
#' @return Returns the variable banding estimate of L, at most K banded, where L^TL = Omega.
#'
#' @examples
#' set.seed(123)
#' n <- 50
#' true <- varband_gen(p = 50, block = 5)
#' x <- sample_gen(L = true, n = n)
#' S <- crossprod(scale(x, center = TRUE, scale = FALSE)) / n
#' init <- diag(1/sqrt(diag(S)))
#' # unweighted estimate
#' L_unweighted <- varband(S, lambda = 0.1, init, w = FALSE)
#' # at most 10-banded unweighted estimate
#' L_unweighted_K10 <- varband(S, lambda = 0.1, init, w = FALSE, K = 10)
#' # weighted estimate
#' L_weighted <- varband(S, lambda = 0.1, init, w = TRUE)
#' # lasso estimate
#' L_lasso <- varband(S, lambda = 0.1, init, w = TRUE, lasso = TRUE)
#' @seealso \code{\link{varband_path}} \code{\link{varband_cv}}
#'
#' @export
varband <- function(S, lambda, init, K = -1L, w = FALSE, lasso = FALSE) {
    .Call('varband_varband', PACKAGE = 'varband', S, lambda, init, K, w, lasso)
}
hugogogo/varband documentation built on May 17, 2019, 9:12 p.m.