# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
l1c_lin <- function(X, z, l2, a = NULL, d = NULL, P = NULL, m = NULL) {
.Call('_gelnet_l1c_lin', PACKAGE = 'gelnet', X, z, l2, a, d, P, m)
}
l1c_blr <- function(X, y, l2, balanced, d = NULL, P = NULL, m = NULL) {
.Call('_gelnet_l1c_blr', PACKAGE = 'gelnet', X, y, l2, balanced, d, P, m)
}
l1c_oclr <- function(X, l2, d = NULL, P = NULL, m = NULL) {
.Call('_gelnet_l1c_oclr', PACKAGE = 'gelnet', X, l2, d, P, m)
}
#' One-class logistic regression objective function
#'
#' Evaluates the one-class objective function value for a given model
#' See details.
#'
#' Computes the objective function value according to
#' \deqn{ -\frac{1}{n} \sum_i s_i - \log( 1 + \exp(s_i) ) + R(w) }
#' where
#' \deqn{ s_i = w^T x_i }
#' \deqn{ R(w) = \lambda_1 \sum_j d_j |w_j| + \frac{\lambda_2}{2} (w-m)^T P (w-m) }
#'
#' @param w p-by-1 vector of model weights
#' @param X n-by-p matrix of n samples in p dimensions
#' @param l1 L1-norm penalty scaling factor \eqn{\lambda_1}
#' @param l2 L2-norm penalty scaling factor \eqn{\lambda_2}
#' @param d p-by-1 vector of feature weights
#' @param P p-by-p feature-feature penalty matrix
#' @param m p-by-1 vector of translation coefficients
#' @return The objective function value.
#' @seealso \code{\link{gelnet}}
#' @export
gelnet_oclr_obj <- function(w, X, l1, l2, d = NULL, P = NULL, m = NULL) {
.Call('_gelnet_gelnet_oclr_obj', PACKAGE = 'gelnet', w, X, l1, l2, d, P, m)
}
#' Linear regression objective function
#'
#' Evaluates the linear regression objective function value for a given model.
#' See details.
#'
#' Computes the objective function value according to
#' \deqn{ \frac{1}{2n} \sum_i a_i (z_i - (w^T x_i + b))^2 + R(w) }
#' where
#' \deqn{ R(w) = \lambda_1 \sum_j d_j |w_j| + \frac{\lambda_2}{2} (w-m)^T P (w-m) }
#'
#' @param w p-by-1 vector of model weights
#' @param b the model bias term
#' @param X n-by-p matrix of n samples in p dimensions
#' @param z n-by-1 response vector
#' @param l1 L1-norm penalty scaling factor \eqn{\lambda_1}
#' @param l2 L2-norm penalty scaling factor \eqn{\lambda_2}
#' @param a n-by-1 vector of sample weights
#' @param d p-by-1 vector of feature weights
#' @param P p-by-p feature-feature penalty matrix
#' @param m p-by-1 vector of translation coefficients
#' @return The objective function value.
#' @export
gelnet_lin_obj <- function(w, b, X, z, l1, l2, a = NULL, d = NULL, P = NULL, m = NULL) {
.Call('_gelnet_gelnet_lin_obj', PACKAGE = 'gelnet', w, b, X, z, l1, l2, a, d, P, m)
}
#' Binary logistic regression objective function
#'
#' Evaluates the logistic regression objective function value for a given model.
#' See details.
#'
#' Computes the objective function value according to
#' \deqn{ -\frac{1}{n} \sum_i y_i s_i - \log( 1 + \exp(s_i) ) + R(w) }
#' where
#' \deqn{ s_i = w^T x_i + b }
#' \deqn{ R(w) = \lambda_1 \sum_j d_j |w_j| + \frac{\lambda_2}{2} (w-m)^T P (w-m) }
#' When balanced is TRUE, the loss average over the entire data is replaced with averaging
#' over each class separately. The total loss is then computes as the mean over those
#' per-class estimates.
#'
#' @param w p-by-1 vector of model weights
#' @param b the model bias term
#' @param X n-by-p matrix of n samples in p dimensions
#' @param y n-by-1 binary response vector sampled from {0,1}
#' @param l1 L1-norm penalty scaling factor \eqn{\lambda_1}
#' @param l2 L2-norm penalty scaling factor \eqn{\lambda_2}
#' @param d p-by-1 vector of feature weights
#' @param P p-by-p feature-feature penalty matrix
#' @param m p-by-1 vector of translation coefficients
#' @param balanced boolean specifying whether the balanced model is being evaluated
#' @return The objective function value.
#' @seealso \code{\link{gelnet}}
#' @export
gelnet_blr_obj <- function(w, b, X, y, l1, l2, balanced = FALSE, d = NULL, P = NULL, m = NULL) {
.Call('_gelnet_gelnet_blr_obj', PACKAGE = 'gelnet', w, b, X, y, l1, l2, balanced, d, P, m)
}
#' GELnet optimizer for linear regression
#'
#' Constructs a GELnet model for linear regression using coordinate descent.
#'
#' The method operates through cyclical coordinate descent.
#' The optimization is terminated after the desired tolerance is achieved, or after a maximum number of iterations.
#'
#' @param X n-by-p matrix of n samples in p dimensions
#' @param z n-by-1 vector of response values
#' @param l1 coefficient for the L1-norm penalty
#' @param l2 coefficient for the L2-norm penalty
#' @param a n-by-1 vector of sample weights
#' @param d p-by-1 vector of feature weights
#' @param P p-by-p feature association penalty matrix
#' @param m p-by-1 vector of translation coefficients
#' @param max_iter maximum number of iterations
#' @param eps convergence precision
#' @param w_init initial parameter estimate for the weights
#' @param b_init initial parameter estimate for the bias term
#' @param fix_bias set to TRUE to prevent the bias term from being updated (default: FALSE)
#' @param silent set to TRUE to suppress run-time output; overwrites verbose (default: FALSE)
#' @param verbose set to TRUE to see extra output; is overwritten by silent (default: FALSE)
#' @param nonneg set to TRUE to enforce non-negativity constraints on the weights (default: FALSE )
#' @return A list with two elements:
#' \describe{
#' \item{w}{p-by-1 vector of p model weights}
#' \item{b}{scalar, bias term for the linear model}
#' }
#' @export
gelnet_lin_opt <- function(X, z, l1, l2, max_iter = 100L, eps = 1e-5, fix_bias = FALSE, silent = FALSE, verbose = FALSE, nonneg = FALSE, w_init = NULL, b_init = NULL, a = NULL, d = NULL, P = NULL, m = NULL) {
.Call('_gelnet_gelnet_lin_opt', PACKAGE = 'gelnet', X, z, l1, l2, max_iter, eps, fix_bias, silent, verbose, nonneg, w_init, b_init, a, d, P, m)
}
#' GELnet optimizer for binary logistic regression
#'
#' Constructs a GELnet model for logistic regression using the Newton method.
#'
#' The method operates by constructing iteratively re-weighted least squares approximations
#' of the log-likelihood loss function and then calling the linear regression routine
#' to solve those approximations. The least squares approximations are obtained via the Taylor series
#' expansion about the current parameter estimates.
#'
#' @param X n-by-p matrix of n samples in p dimensions
#' @param y n-by-1 vector of binary response labels (must be in {0,1})
#' @param l1 coefficient for the L1-norm penalty
#' @param l2 coefficient for the L2-norm penalty
#' @param d p-by-1 vector of feature weights
#' @param P p-by-p feature association penalty matrix
#' @param m p-by-1 vector of translation coefficients
#' @param max_iter maximum number of iterations
#' @param eps convergence precision
#' @param w_init initial parameter estimate for the weights
#' @param b_init initial parameter estimate for the bias term
#' @param silent set to TRUE to suppress run-time output to stdout (default: FALSE)
#' @param balanced boolean specifying whether the balanced model is being trained
#' @param nonneg set to TRUE to enforce non-negativity constraints on the weights (default: FALSE )
#' @return A list with two elements:
#' \describe{
#' \item{w}{p-by-1 vector of p model weights}
#' \item{b}{scalar, bias term for the linear model}
#' }
#' @seealso \code{\link{gelnet.lin}}
#' @export
gelnet_blr_opt <- function(X, y, l1, l2, max_iter = 100L, eps = 1e-5, silent = FALSE, verbose = FALSE, balanced = FALSE, nonneg = FALSE, w_init = NULL, b_init = NULL, d = NULL, P = NULL, m = NULL) {
.Call('_gelnet_gelnet_blr_opt', PACKAGE = 'gelnet', X, y, l1, l2, max_iter, eps, silent, verbose, balanced, nonneg, w_init, b_init, d, P, m)
}
#' GELnet optimizer for one-class logistic regression
#'
#' Constructs a GELnet model for one-class regression using the Newton method.
#'
#' The function optimizes the following objective:
#' \deqn{ -\frac{1}{n} \sum_i s_i - \log( 1 + \exp(s_i) ) + R(w) }
#' where
#' \deqn{ s_i = w^T x_i }
#' \deqn{ R(w) = \lambda_1 \sum_j d_j |w_j| + \frac{\lambda_2}{2} (w-m)^T P (w-m) }
#' The method operates by constructing iteratively re-weighted least squares approximations
#' of the log-likelihood loss function and then calling the linear regression routine
#' to solve those approximations. The least squares approximations are obtained via the Taylor series
#' expansion about the current parameter estimates.
#'
#' @param X n-by-p matrix of n samples in p dimensions
#' @param l1 coefficient for the L1-norm penalty
#' @param l2 coefficient for the L2-norm penalty
#' @param d p-by-1 vector of feature weights
#' @param P p-by-p feature association penalty matrix
#' @param m p-by-1 vector of translation coefficients
#' @param max_iter maximum number of iterations
#' @param eps convergence precision
#' @param w_init initial parameter estimate for the weights
#' @param silent set to TRUE to suppress run-time output to stdout (default: FALSE)
#' @param nonneg set to TRUE to enforce non-negativity constraints on the weights (default: FALSE )
#' @return A list with one element:
#' \describe{
#' \item{w}{p-by-1 vector of p model weights}
#' }
#' @export
gelnet_oclr_opt <- function(X, l1, l2, max_iter = 100L, eps = 1e-5, silent = FALSE, verbose = FALSE, nonneg = FALSE, w_init = NULL, d = NULL, P = NULL, m = NULL) {
.Call('_gelnet_gelnet_oclr_opt', PACKAGE = 'gelnet', X, l1, l2, max_iter, eps, silent, verbose, nonneg, w_init, d, P, m)
}
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.