# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Compound decision method for multivariate linear models
#'
#' The function uses EM algorithm to solve multivariate linear regression
#' problems \deqn{Y = XB + \epsilon} ' both outcome \eqn{Y} and
#' feature \eqn{X} are multi-dimensional. Users can set distinct residual
#' estimations fordifferent outcomes or set identical estimation for more
#' robust results. Details can be found in
#' \href{https://github.com/sdzhao/cole}{our paper}.
#'
#' @param y \eqn{n x q} matrix of outcomes for training.
#' @param x \eqn{n x p} matrix of features for training.
#' @param S \eqn{d x L} matrix of support points. If \eqn{L = p + 1},
#' then the first p columns are \eqn{\beta}s and the last
#' column is the corresponding residual error estimates.
#' If \eqn{L = p}, then each column of S is a vector of
#' \eqn{\beta}s and argument min_s2 is required.
#' \eqn{d = q x g} where g is the number of groups of
#' support points. Support points can be estimated by
#' other methods that solve multivariate linear regression.
#' Eg. LASSO from glmnet.
#' @param tol error tolerance for convergence of EM algorithm.
#' Default value of tol is 1e-6.
#' @param maxit maximum number of allowable iterations.
#' Default value of maxit is 1e5.
#' @param min_s2 a positive number corresponds to minimal variance
#' of estimated y, min_s2 is required when there are
#' `p` columns in `S`.
#' @return
#'
#' - `f`: vector with \eqn{g x q} elements that describes the mixture of
#' \eqn{\beta}s.
#' - `A`: matrix with dimension \eqn{q x d}. `A` is an estimation of likelihood
#' by EM algorithm and will be used in predicting.
#' - `bs`: Matrix with dimension \eqn{d x (p + 1)}. `bs` is support points
#' used in updating prior distribution `f`. `bs` is equivalent to `S`
#' when \eqn{L = p + 1} and will be used in predicting.
#'
#' @examples
#' \donttest{
#' ## generate data
#' p = 10
#' q = 5
#' n = 50
#' x = matrix(rnorm(n*p,0,10), n, p)
#' beta = matrix(rnorm(p*q,0,10), q, p)
#' e = matrix(rnorm(n*q,0,0.1),n,q)
#' y = x %*% t(beta) + e
#' s2 = matrix(rep(0.1,q), q, 1)
#' ## initialize parameters for EM algorithm
#' x_test = matrix(rnorm(n*p,0,1), n, p)
#' ## set minimal variance estimation min_s2 = 0.1
#' output1 = comte(y=y, x=x, S=beta, min_s2=0.1)
#' ## use distinct variance from multivariate linear regression models
#'output2 = comte(y=y, x=x, S=cbind(beta,s2))
#' }
#'
#' @useDynLib cole
#' @export
comte <- function(y, x, S, tol = 1e-6, maxit = 1e5L, min_s2 = NULL, scale = 1, cutoff = 0) {
.Call('_cole_comte', PACKAGE = 'cole', y, x, S, tol, maxit, min_s2, scale, cutoff)
}
#' Predicting function for compound decision method
#'
#' The function takes returned object from comte and testing data as input.
#' Return value is the prediction for corresponding output.
#'
#' @param comte_obj returned object from comte function that contains
#' important information for predicting.
#' @param newx \eqn{m x p} matrix corresponds to testing data.
#'
#' @return
#'
#' - `esty`: \eqn{m x q} matrix of estimated y based on newx.
#'
#' @examples
#' \donttest{
#' ## generate data
#' p = 10
#' q = 5
#' n = 50
#' x = matrix(rnorm(n*p,0,10), n, p)
#' beta = matrix(rnorm(p*q,0,10), q, p)
#' e = matrix(rnorm(n*q,0,0.1),n,q)
#' y = x %*% t(beta) + e
#' s2 = matrix(rep(0.1,q), q, 1)
#' ## initialize parameters for EM algorithm
#' x_test = matrix(rnorm(n*p,0,1), n, p)
#' ## set minimal variance estimation min_s2 = 0.1
#' output1 = comte(y=y, x=x, S=beta, min_s2=0.1)
#' esty1 = predict_comte(output1, x_test)
#' ## use distinct variance from multivariate linear regression models
#' output2 = comte(y=y, x=x, S=cbind(beta,s2))
#' esty2 = predict_comte(output2, x_test)
#' }
#'
#' @useDynLib cole
#' @export
predict_comte <- function(comte_obj, newx) {
.Call('_cole_predict_comte', PACKAGE = 'cole', comte_obj, newx)
}
#' Least-Squares Pooled Adjacent Violators Algorithm
#'
#' An implementation of the pooled adjacent violators algorithm for
#' minimizing the least squares loss subject to the monotonicity
#' (non-decreasing) constraint.
#'
#' It is assumed that `y` is ordered corresponding to the correct indices for
#' a monotone non-decreasing fit. If a monotone non-increasing fit is desired
#' use `base::rev()` as in the examples.
#'
#' The basic idea is to start by partitioning the data into singletons (zero
#' least squares loss), then merge neighboring sets if the monotone
#' non-decreasing constraint is violated. The value a set takes is the average
#' of its elements, this is implemented by tracking set cardinality and
#' the current mean so that the merger of two sets is just a weighted sum.
#'
#' This algorithm has time complexity O(n). However, this implementation may
#' have worse complexity because of the way we are removing elements in a
#' vector during the merge.
#'
#' @param y numeric vector of observations corresponding to a sorted,
#' increasing predictor
#' @return a numeric vector of fitted values corresponding to y
#' @examples
#' # non-decreasing example
#' set.seed(1)
#' y <- atan(seq(from = -5, to = 5, length.out = 51)) + rnorm(51, sd = 0.2)
#' y.hat <- pava_ls(y) # not exported
#' plot(y)
#' points(y.hat, col = "red")
#'
#' #non-increasing example
#' set.seed(1)
#' y <- -atan(seq(from = -5, to = 5, length.out = 51)) + rnorm(51, sd = 0.2)
#' y.hat <- rev(pava_ls(rev(y)))
#' plot(y)
#' points(y.hat, col = "red")
#'
#' @useDynLib cole
#' @importFrom Rcpp evalCpp
#' @export
pava_ls <- function(y) {
.Call('_cole_pava_ls', PACKAGE = 'cole', y)
}
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.