R/RcppExports.R

Defines functions sampler graycode blma_fixed blma pva log_BF_Zellner_Siow_quad log_BF_Zellner_Siow_integrand log_BF_g_on_n_quad log_BF_g_on_n_integrand robust_bayarri2 robust_bayarri1 liang_g_n_approx liang_g_n_quad liang_g_n_appell liang_g2 liang_g1 log_hyperg_2F1_naive log_hyperg_2F1 ZE BIC

Documented in blma blma_fixed graycode pva sampler

# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393

#' BIC prior
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
BIC <- function(n, p_gamma, R2) {
    .Call('_blma_BIC', PACKAGE = 'blma', n, p_gamma, R2)
}

#' ZE prior
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
ZE <- function(n, p_gamma, R2) {
    .Call('_blma_ZE', PACKAGE = 'blma', n, p_gamma, R2)
}

#' log_hyperg_2F1 prior
#'
#' @param b
#' @param c
#' @param x
#' @return
#' @export
log_hyperg_2F1 <- function(b, c, x) {
    .Call('_blma_log_hyperg_2F1', PACKAGE = 'blma', b, c, x)
}

#' log_hyperg_2F1_naive
#'
#' @param b
#' @param c
#' @param x
#' @return The log of the Bayes Factor
#' @export
log_hyperg_2F1_naive <- function(b, c, x) {
    .Call('_blma_log_hyperg_2F1_naive', PACKAGE = 'blma', b, c, x)
}

#' Liang's hyper g-prior
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
liang_g1 <- function(n, p_gamma, R2) {
    .Call('_blma_liang_g1', PACKAGE = 'blma', n, p_gamma, R2)
}

#' Liang's g prior
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
liang_g2 <- function(n, p_gamma, R2) {
    .Call('_blma_liang_g2', PACKAGE = 'blma', n, p_gamma, R2)
}

#' Liang's g/n prior Appell
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
liang_g_n_appell <- function(n, p_gamma, R2) {
    .Call('_blma_liang_g_n_appell', PACKAGE = 'blma', n, p_gamma, R2)
}

#' Liang's g/n prior quadrature
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
liang_g_n_quad <- function(n, p_gamma, R2) {
    .Call('_blma_liang_g_n_quad', PACKAGE = 'blma', n, p_gamma, R2)
}

#' Liang's g/n prior approximation
#' Liang's g/n prior quadrature
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
liang_g_n_approx <- function(n, p_gamma, R2) {
    .Call('_blma_liang_g_n_approx', PACKAGE = 'blma', n, p_gamma, R2)
}

#' Robust Bayarri 1
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
robust_bayarri1 <- function(n, p_gamma, R2) {
    .Call('_blma_robust_bayarri1', PACKAGE = 'blma', n, p_gamma, R2)
}

#' Robust Bayarri 2
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
robust_bayarri2 <- function(n, p_gamma, R2) {
    .Call('_blma_robust_bayarri2', PACKAGE = 'blma', n, p_gamma, R2)
}

#' log_BF_g_on_n_integrand
#'
#' @param vu The argument, vu
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
log_BF_g_on_n_integrand <- function(vu, n, p_gamma, R2) {
    .Call('_blma_log_BF_g_on_n_integrand', PACKAGE = 'blma', vu, n, p_gamma, R2)
}

#' hyper-g/n Gauss-Legendre quadrature
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
log_BF_g_on_n_quad <- function(n, p_gamma, R2) {
    .Call('_blma_log_BF_g_on_n_quad', PACKAGE = 'blma', n, p_gamma, R2)
}

#' log_BF_Zellner_Siow_integrand
#'
#' @param x The argument, x
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
log_BF_Zellner_Siow_integrand <- function(x, n, p_gamma, R2) {
    .Call('_blma_log_BF_Zellner_Siow_integrand', PACKAGE = 'blma', x, n, p_gamma, R2)
}

#' Zellner-Siow Gauss-Laguerre quadrature
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
log_BF_Zellner_Siow_quad <- function(n, p_gamma, R2) {
    .Call('_blma_log_BF_Zellner_Siow_quad', PACKAGE = 'blma', n, p_gamma, R2)
}

#' Run a Collapsed Variational Approximation to find the K best linear models
#'
#' @param vy_in Vector of responses
#' @param mX_in The matrix of covariates which may or may not be included in each model
#' @param mGamma_in Matrix of initial models, a K by p logical matrix
#' @param prior -- the choice of mixture $g$-prior used to perform Bayesian model averaging. The choices
#' available include:
#'   \itemize{
#'     \item{"BIC"}{-- the Bayesian information criterion obtained by using the cake prior
#'     of Ormerod et al. (2017).}
#'
#'     \item{"ZE"}{-- special case of the prior structure described by Maruyama and George (2011).}
#'
#'     \item{"liang_g1"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior hyperparameter
#'     \eqn{a=3} evaluated directly using Equation (10) of Greenaway and Ormerod (2018) where the Gaussian
#'     hypergeometric function is evaluated using the {gsl} library. Note: this option can lead to numerical problems and is only
#''    meant to be used for comparative purposes.}
#'
#'     \item{"liang_g2"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior hyperparameter
#'      \eqn{a=3} evaluated directly using Equation (11)  of Greenaway and Ormerod (2018).}
#'
#'     \item{"liang_g_n_appell"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008) with prior
#'      hyperparameter \eqn{a=3} evaluated using the {appell R} package.}
#'
#'     \item{"liang_g_n_approx"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008) with prior hyperparameter
#'      \eqn{a=3} using the approximation Equation (15)  of Greenaway and Ormerod (2018) for model with more
#'       than two covariates and numerical quadrature (see below) for models with one or two covariates.}
#'
#'     \item{"liang_g_n_quad"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008) with prior hyperparameter
#'      \eqn{a=3} evaluated using a composite trapezoid rule.}
#'
#'     \item{"robust_bayarri1"}{-- the robust prior of Bayarri et al. (2012) using default prior hyper
#'     parameter choices evaluated directly using Equation (18)  of Greenaway and Ormerod (2018) with the
#'     {gsl} library.}
#'
#'     \item{"robust_bayarri2"}{-- the robust prior of Bayarri et al. (2012) using default prior hyper
#'     parameter choices evaluated directly using Equation (19) of Greenaway and Ormerod (2018).}
#' }
#' @param modelprior The model prior to use. The choices of model prior are "uniform", "beta-binomial" or
#' "bernoulli". The choice of model prior dictates the meaning of the parameter modelpriorvec.
#' @param modelpriorvec_in If modelprior is "uniform", then the modelpriorvec is ignored and can be null.
#'
#' If
#' the modelprior is "beta-binomial" then modelpriorvec should be length 2 with the first element containing
#' the alpha hyperparameter for the beta prior and the second element containing the beta hyperparameter for
#' the beta prior.
#'
#' If modelprior is "bernoulli", then modelpriorvec must be of the same length as the number
#' of columns in mX. Each element i of modelpriorvec contains the prior probability of the the ith covariate
#' being included in the model.
#' @param bUnique Whether to ensure uniqueness in the population of particles or not. Defaults to true.
#' @param lambda The weighting factor for the entropy in f_lambda. Defaults to 1.
#' @param cores The number of cores to use. Defaults to 1.
#' @return The object returned is a list containing:
#' \itemize{
#'   \item{"mGamma"}{-- A K by p binary matrix containing the final population of models}
#'
#'   \item{"vgamma.hat"}{-- The most probable model found by pva}
#'
#'   \item{"vlogBF"}{-- The null-based Bayes factor for each model in the population}
#'
#'   \item{"posterior_model_probabilities"}{-- The estimated posterior model parameters for each model in
#'   the population.}
#'
#'   \item{"posterior_inclusion_probabilities"}{-- The estimated variable inclusion probabilities for each
#'   model in the population.}
#'
#'   \item{"vR2"}{-- The fitted R-squared values for each model in the population.}
#'
#'   \item{"vp"}{-- The model size for each model in the population.}
#' }
#' @examples
#' mD <- MASS::UScrime
#' notlog <- c(2,ncol(MASS::UScrime))
#' mD[,-notlog] <- log(mD[,-notlog])
#'
#' for (j in 1:ncol(mD)) {
#'   mD[,j] <- (mD[,j] - mean(mD[,j]))/sd(mD[,j])
#' }
#'
#' varnames <- c(
#'   "log(AGE)",
#'   "S",
#'   "log(ED)",
#'   "log(Ex0)",
#'   "log(Ex1)",
#'   "log(LF)",
#'   "log(M)",
#'   "log(N)",
#'   "log(NW)",
#'   "log(U1)",
#'   "log(U2)",
#'   "log(W)",
#'   "log(X)",
#'   "log(prison)",
#'   "log(time)")
#'
#' y.t <- mD$y
#' X.f <- data.matrix(cbind(mD[1:15]))
#' colnames(X.f) <- varnames
#' K <- 100
#' p <- ncol(X.f)
#' initial_gamma <- matrix(rbinom(K * p, 1, .5), K, p)
#' pva_result <- pva(y.t, X.f, initial_gamma, prior = "BIC", modelprior = "uniform",
#'                   modelpriorvec_in=NULL)
#' @references
#' Bayarri, M. J., Berger, J. O., Forte, A., Garcia-Donato, G., 2012. Criteria for Bayesian
#' model choice with application to variable selection. Annals of Statistics 40 (3), 1550-
#' 1577.
#'
#' Greenaway, M. J., J. T. Ormerod (2018) Numerical aspects of Bayesian linear models averaging using mixture
#' g-priors.
#'
#' Liang, F., Paulo, R., Molina, G., Clyde, M. a., Berger, J. O., 2008. Mixtures of g priors for
#' Bayesian variable selection. Journal of the American Statistical Association 103 (481),
#' 410-423.
#'
#' Ormerod, J. T., Stewart, M., Yu, W., Romanes, S. E., 2017. Bayesian hypothesis tests
#' with diffuse priors: Can we have our cake and eat it too?
#' @export
pva <- function(vy_in, mX_in, mGamma_in, prior, modelprior, modelpriorvec_in = NULL, bUnique = TRUE, lambda = 1., cores = 1L) {
    .Call('_blma_pva', PACKAGE = 'blma', vy_in, mX_in, mGamma_in, prior, modelprior, modelpriorvec_in, bUnique, lambda, cores)
}

#' Perform Bayesian Linear Model Averaging over all of the possible linear models where 
#' vy is the response and the covariates are in mX.
#'
#' @importFrom Rcpp evalCpp
#' @useDynLib blma
#'
#' @usage blma(vy, mX, prior = "BIC", modelprior = "uniform", modelpriorvec = NULL)
#' @param vy Vector of responses
#' @param mX Covariate matrix
#' @param prior -- the choice of mixture $g$-prior used to perform Bayesian model
#' averaging. The choices available include:
#'   \itemize{
#'     \item{"BIC"}{-- the Bayesian information criterion obtained by using the cake
#'     prior of Ormerod et al. (2017).}
#'
#'     \item{"ZE"}{-- special case of the prior structure described by BIC and
#'	George (2011).}
#'
#'     \item{"liang_g1"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior 
#'	hyperparameter \eqn{a=3} evaluated directly using Equation (10) of Greenaway and 
#'	Ormerod (2018) where the Gaussian hypergeometric function is evaluated using the 
#'	{gsl} library. Note: this option can lead to numerical problems and is only
#'     meant to be used for comparative purposes.}
#'
#'     \item{"liang_g2"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior 
#'	hyperparameter \eqn{a=3} evaluated directly using Equation (11) of Greenaway and 
#'	Ormerod (2018).}
#'
#'     \item{"liang_g_n_appell"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#'	with prior hyperparameter \eqn{a=3} evaluated using the {appell R} package.}
#'
#'     \item{"liang_g_approx"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#'	with prior hyperparameter eqn{a=3} using the approximation Equation (15)  of
#'	Greenaway and Ormerod (2018) for model with more than two covariates and
#'	numerical quadrature (see below) for models with one or two covariates.}
#'
#'     \item{"liang_g_n_quad"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#'	with prior hyperparameter eqn{a=3} evaluated using a composite trapezoid rule.}
#'
#'     \item{"robust_bayarri1"}{-- the robust prior of Bayarri et al. (2012) using
#'	default prior hyper choices evaluated directly using Equation (18)  of Greenaway 
#'	and Ormerod (2018) with the {gsl} library.}
#'
#'     \item{"robust_bayarri2"}{-- the robust prior of Bayarri et al. (2012) using 
#'	default prior hyper choices evaluated directly using Equation (19) of Greenaway
#'	and Ormerod (2018).}
#'     \item{"zellner_siow_gauss_laguerre"}{-- the Zellner-Siow prior calculated
#' using Gauss-Laguerre quadrature with 1000 quadrature points}
#' }
#' @param modelprior The model prior to use. The choices of model prior are "uniform",
#' "beta-binomial" or "bernoulli". The choice of model prior dictates the meaning of the
#' parameter modelpriorvec.
#' @param modelpriorvec If modelprior is "uniform", then the modelpriorvec is ignored
#' and can be null.
#'
#' If modelprior is "beta-binomial" then modelpriorvec should be length 2 with the first
#' element containing alpha hyperparameter for the beta prior and the second element
#' containing the beta hyperparameter for beta prior.
#'
#' If modelprior is "bernoulli", then modelpriorvec must be of the same length as the
#' number columns in mX. Each element i of modelpriorvec contains the prior probability 
#' of the the ith covariate being included in the model.
#' @param cores The number of cores to use. Defaults to 1
#' @return A list containing
#' \describe{
#' \item{vR2}{the vector of correlations for each model}
#' \item{vp_gamma}{the vector of number of covariates for each model}
#' \item{vlogBF}{the vector of logs of the Bayes Factors of each model}
#' \item{vinclusion_prob}{the vector of inclusion probabilities for each of the
#' covariates}
#' }
#' @examples
#' mD <- MASS::UScrime
#' notlog <- c(2,ncol(MASS::UScrime))
#' mD[,-notlog] <- log(mD[,-notlog])
#'
#' for (j in 1:ncol(mD)) {
#'   mD[,j] <- (mD[,j] - mean(mD[,j]))/sd(mD[,j])
#' }
#'
#' varnames <- c(
#'   "log(AGE)",
#'   "S",
#'   "log(ED)",
#'   "log(Ex0)",
#'   "log(Ex1)",
#'   "log(LF)",
#'   "log(M)",
#'   "log(N)",
#'   "log(NW)",
#'   "log(U1)",
#'   "log(U2)",
#'   "log(W)",
#'   "log(X)",
#'   "log(prison)",
#'   "log(time)")
#'
#' vy <- mD$y
#' mX <- data.matrix(cbind(mD[1:15]))
#' colnames(mX) <- varnames
#' blma_result <- blma(vy, mX, "BIC")
#' @references
#' Bayarri, M. J., Berger, J. O., Forte, A., Garcia-Donato, G., 2012. Criteria for
#' Bayesian model choice with application to variable selection. Annals of Statistics
#' 40 (3), 1550-1577.
#'
#' Greenaway, M. J., J. T. Ormerod (2018) Numerical aspects of Bayesian linear models
#' averaging using mixture g-priors.
#'
#' Liang, F., Paulo, R., Molina, G., Clyde, M. a., Berger, J. O., 2008. Mixtures of g
#' priors for Bayesian variable selection. Journal of the American Statistical
#' Association 103 (481), 410-423.
#'
#' Ormerod, J. T., Stewart, M., Yu, W., Romanes, S. E., 2017. Bayesian hypothesis tests
#' with diffuse priors: Can we have our cake and eat it too?
#' @export
blma <- function(vy, mX, prior, modelprior = "uniform", modelpriorvec = NULL, cores = 1L) {
    .Call('_blma_blma', PACKAGE = 'blma', vy, mX, prior, modelprior, modelpriorvec, cores)
}

#' Perform Bayesian Linear Model Averaging over all of the possible linear models where //' vy is the response, covariates that may be included are in mZ and covariates which
#' are always included are in mX.
#'
#' @usage blma_fixed(y.t, X.f, Z.f, "BIC")
#' @param vy The vector of responses
#' @param mX The matrix of fixed covariates which will be included in every model
#' @param mZ The matrix of varying covariates, which may or may not be included in each 
#' model
#' @param prior -- the choice of mixture $g$-prior used to perform Bayesian model
#' averaging. The choices available include:
#'   \itemize{
#'     \item{"BIC"}{-- the Bayesian information criterion obtained by using the cake
#'     prior of Ormerod et al. (2017).}
#'
#'     \item{"ZE"}{-- special case of the prior structure described by BIC and
#'	George (2011).}
#'
#'     \item{"liang_g1"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior 
#'	hyperparameter \eqn{a=3} evaluated directly using Equation (10) of Greenaway and 
#'	Ormerod (2018) where the Gaussian hypergeometric function is evaluated using the 
#'	{gsl} library. Note: this option can lead to numerical problems and is only
#'     meant to be used for comparative purposes.}
#'
#'     \item{"liang_g2"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior 
#'	hyperparameter \eqn{a=3} evaluated directly using Equation (11) of Greenaway and 
#'	Ormerod (2018).}
#'
#'     \item{"liang_g_n_appell"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#'	with prior hyperparameter \eqn{a=3} evaluated using the {appell R} package.}
#'
#'     \item{"liang_g_approx"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#'	with prior hyperparameter eqn{a=3} using the approximation Equation (15)  of
#'	Greenaway and Ormerod (2018) for model with more than two covariates and
#'	numerical quadrature (see below) for models with one or two covariates.}
#'
#'     \item{"liang_g_n_quad"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#'	with prior hyperparameter eqn{a=3} evaluated using a composite trapezoid rule.}
#'
#'     \item{"robust_bayarri1"}{-- the robust prior of Bayarri et al. (2012) using
#'	default prior hyper choices evaluated directly using Equation (18)  of
#' 	Greenaway and Ormerod (2018) with the {gsl} library.}
#'
#'     \item{"robust_bayarri2"}{-- the robust prior of Bayarri et al. (2012) using 
#'	default prior hyper choices evaluated directly using Equation (19) of
#'	Greenaway Ormerod (2018).}
#' }
#' @param modelprior The model prior to use. The choices of model prior are "uniform",
#' "beta-binomial" or "bernoulli". The choice of model prior dictates the meaning of the
#' parameter modelpriorvec.
#' @param modelpriorvec If modelprior is "uniform", then the modelpriorvec is ignored
#' and can be null.
#'
#' If modelprior is "beta-binomial" then modelpriorvec should be length 2 with the first
#' element containing alpha hyperparameter for the beta prior and the second element
#' containing the beta hyperparameter for beta prior.
#'
#' If modelprior is "bernoulli", then modelpriorvec must be of the same length as the
#' number columns in mX. Each element i of modelpriorvec contains the prior probability 
#' of the the ith covariate being included in the model.
#' @param cores The number of cores to use. Defaults to 1
#'
#' @return A list containing
#' \describe{
#' \item{vR2}{the vector of correlations for each model}
#' \item{vp_gamma}{the vector of number of covariates for each model}
#' \item{vlogBF}{the vector of logs of the Bayes Factors of each model}
#' \item{vinclusion_prob}{the vector of inclusion probabilities for each of the
#'       covariates}
#' }
#' @examples
#' mD <- MASS::UScrime
#' notlog <- c(2,ncol(MASS::UScrime))
#' mD[,-notlog] <- log(mD[,-notlog])
#'
#' for (j in 1:ncol(mD)) {
#'   mD[,j] <- (mD[,j] - mean(mD[,j]))/sd(mD[,j])
#' }
#'
#' varnames <- c(
#'   "log(AGE)",
#'   "S",
#'   "log(ED)",
#'   "log(Ex0)",
#'   "log(Ex1)",
#'   "log(LF)",
#'   "log(M)",
#'   "log(N)",
#'   "log(NW)",
#'   "log(U1)",
#'   "log(U2)",
#'   "log(W)",
#'   "log(X)",
#'   "log(prison)",
#'   "log(time)")
#'
#' vy <- mD$y
#' mX <- data.matrix(cbind(mD[, 1:10]))
#' colnames(mX) <- varnames[1:10]
#' mZ <- data.matrix(cbind(mD[, 11:15]))
#' blma_result <- blma_fixed(vy, mX, mZ, "BIC")
#' @references
#' Bayarri, M. J., Berger, J. O., Forte, A., Garcia-Donato, G., 2012. Criteria for
#' Bayesian model choice with application to variable selection. Annals of Statistics
#' 40 (3), 1550-1577.
#'
#' Greenaway, M. J., J. T. Ormerod (2018) Numerical aspects of Bayesian linear models
#' averaging using mixture g-priors.
#'
#' Liang, F., Paulo, R., Molina, G., Clyde, M. a., Berger, J. O., 2008. Mixtures of g
#' priors for Bayesian variable selection. Journal of the American Statistical
#' Association 103 (481), 410-423.
#'
#' Ormerod, J. T., Stewart, M., Yu, W., Romanes, S. E., 2017. Bayesian hypothesis tests
#' with diffuse priors: Can we have our cake and eat it too?
#' @export
blma_fixed <- function(vy, mX, mZ, prior, modelprior = "uniform", modelpriorvec = NULL, cores = 1L) {
    .Call('_blma_blma_fixed', PACKAGE = 'blma', vy, mX, mZ, prior, modelprior, modelpriorvec, cores)
}

#' Return the graycode matrix
#'
#' @usage graycode(varying, fixed)
#' @param varying The number of covariates varying in the graycode matrix
#' @param fixed The number of fixed covariates in the graycode matrix. These
#' covariates will always be included
#' @return The graycode matrix. The number of fixed columns will be included in the //' lower indexed columns as 1s, while the higher indexed columns will varying
#' depending on whether each covariate in the varying set of covariates is included 
#' or not.
#' @export
graycode <- function(varying, fixed = 0L) {
    .Call('_blma_graycode', PACKAGE = 'blma', varying, fixed)
}

#' sampler
#'
#' @usage sampler(iterations, vy, mX, prior = "BIC", modelprior = "uniform",
#' modelpriorvec_in=NULL, cores=1)
#' @param iterations The number of iterations to run the MCMC sampler for
#' @param vy_in Vector of responses
#' @param mX_in The matrix of covariates which may or may not be included in each model
#' @param prior -- the choice of mixture $g$-prior used to perform Bayesian model
#' averaging. The choices available include:
#'   \itemize{
#'     \item{"BIC"}{-- the Bayesian information criterion obtained by using the cake
#'     prior of Ormerod et al. (2017).}
#'
#'     \item{"ZE"}{-- special case of the prior structure described by Maruyama and
#'	George (2011).}
#'
#'     \item{"liang_g1"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior 
#'	hyperparameter \eqn{a=3} evaluated directly using Equation (10) of Greenaway and 
#'	Ormerod (2018) where the Gaussian hypergeometric function is evaluated using the 
#'	{gsl} library. Note: this option can lead to numerical problems and is only
#'     meant to be used for comparative purposes.}
#'
#'     \item{"liang_g2"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior 
#'	hyperparameter \eqn{a=3} evaluated directly using Equation (11) of Greenaway and 
#'	Ormerod (2018).}
#'
#'     \item{"liang_g_n_appell"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#'	with prior hyperparameter \eqn{a=3} evaluated using the {appell R} package.}
#'
#'     \item{"liang_g_approx"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#'	with prior hyperparameter eqn{a=3} using the approximation Equation (15)  of
#'	Greenaway and Ormerod (2018) for model with more than two covariates and
#'	numerical quadrature (see below) for models with one or two covariates.}
#'
#'     \item{"liang_g_n_quad"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#'	with prior hyperparameter eqn{a=3} evaluated using a composite trapezoid rule.}
#'
#'     \item{"robust_bayarri1"}{-- the robust prior of Bayarri et al. (2012) using
#'	default prior hyper choices evaluated directly using Equation (18)  of Greenaway 
#'	and Ormerod (2018) with the {gsl} library.}
#'
#'     \item{"robust_bayarri2"}{-- the robust prior of Bayarri et al. (2012) using 
#'	default prior hyper choices evaluated directly using Equation (19) of Greenaway
#'	and Ormerod (2018).}
#' }
#' @param modelprior The model prior to use. The choices of model prior are "uniform",
#' "beta-binomial" or "bernoulli". The choice of model prior dictates the meaning of the
#' parameter modelpriorvec.
#' @param modelpriorvec If modelprior is "uniform", then the modelpriorvec is ignored
#' and can be null.
#'
#' If modelprior is "beta-binomial" then modelpriorvec should be length 2 with the first
#' element containing alpha hyperparameter for the beta prior and the second element
#' containing the beta hyperparameter for beta prior.
#'
#' If modelprior is "bernoulli", then modelpriorvec must be of the same length as the
#' number columns in mX. Each element i of modelpriorvec contains the prior probability 
#' of the the ith covariate being included in the model.
#' @param cores The number of cores to use. Defaults to 1.
#' @return The object returned is a list containing:
#' \itemize{
#'   \item{"mGamma"}{-- An iterations by p binary matrix containing the sampled models.}
#'   \item{"vinclusion_prob"}{-- The vector of inclusion probabilities.}
#'   \item{"vlogBF"}{-- The vector of logs of the Bayes Factors of the models in mGamma.}
#' }
#' @examples
#' mD <- MASS::UScrime
#' notlog <- c(2,ncol(MASS::UScrime))
#' mD[,-notlog] <- log(mD[,-notlog])
#'
#' for (j in 1:ncol(mD)) {
#'   mD[,j] <- (mD[,j] - mean(mD[,j]))/sd(mD[,j])
#' }
#'
#' varnames <- c(
#'   "log(AGE)",
#'   "S",
#'   "log(ED)",
#'   "log(Ex0)",
#'   "log(Ex1)",
#'   "log(LF)",
#'   "log(M)",
#'   "log(N)",
#'   "log(NW)",
#'   "log(U1)",
#'   "log(U2)",
#'   "log(W)",
#'   "log(X)",
#'   "log(prison)",
#'   "log(time)")
#'
#' vy <- mD$y
#' mX <- data.matrix(cbind(mD[1:15]))
#' colnames(mX) <- varnames
#' K <- 100
#' p <- ncol(mX)
#' sampler_result <- sampler(10000, vy, mX, prior = "BIC",
#'					  modelprior = "uniform",
#'                                   modelpriorvec_in=NULL)
#'
#' @references
#' Bayarri, M. J., Berger, J. O., Forte, A., Garcia-Donato, G., 2012. Criteria for
#' Bayesian model choice with application to variable selection. Annals of Statistics
#' 40 (3), 1550-1577.
#'
#' Greenaway, M. J., J. T. Ormerod (2018) Numerical aspects of Bayesian linear models
#' averaging using mixture g-priors.
#'
#' Liang, F., Paulo, R., Molina, G., Clyde, M. a., Berger, J. O., 2008. Mixtures of g
#' priors for Bayesian variable selection. Journal of the American Statistical
#' Association 103 (481), 410-423.
#'
#' Ormerod, J. T., Stewart, M., Yu, W., Romanes, S. E., 2017. Bayesian hypothesis tests
#' with diffuse priors: Can we have our cake and eat it too?
#' @export
sampler <- function(iterations, vy_in, mX_in, prior, modelprior, modelpriorvec_in = NULL, cores = 1L) {
    .Call('_blma_sampler', PACKAGE = 'blma', iterations, vy_in, mX_in, prior, modelprior, modelpriorvec_in, cores)
}
certifiedwaif/blma documentation built on July 8, 2023, 10:29 p.m.