R/RcppExports.R

Defines functions rttnorm rtnorm gibbs_sampling update_d ll_ordered d_to_gamma update_U_ranked update_U update_Sigma update_reg update_Omega update_b update_m update_z update_s rwishart rdirichlet rmvnorm dmvnorm update_classes_dp update_classes_wb euc_dist

Documented in dmvnorm d_to_gamma euc_dist gibbs_sampling ll_ordered rdirichlet rmvnorm rtnorm rttnorm rwishart update_b update_classes_dp update_classes_wb update_d update_m update_Omega update_reg update_s update_Sigma update_U update_U_ranked update_z

# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393

#' Euclidean distance
#' @description
#' This function computes the euclidean distance between two vectors.
#' @param a
#' A numeric vector.
#' @param b
#' Another numeric vector of the same length as \code{a}.
#' @return
#' The euclidean distance.
#' @export
#' @keywords
#' internal utils
#'
euc_dist <- function(a, b) {
    .Call(`_RprobitB_euc_dist`, a, b)
}

#' Weight-based update of latent classes
#' @description
#' This function updates the latent classes based on their class weights.
#' @param Cmax
#' The maximum number of classes.
#' @param epsmin
#' The threshold weight (between 0 and 1) for removing a class.
#' @param epsmax
#' The threshold weight (between 0 and 1) for splitting a class.
#' @param distmin
#' The (non-negative) threshold difference in class means for joining two
#' classes.
#' @inheritParams RprobitB_parameter
#' @details
#' The updating scheme bases on the following rules:
#' * We remove class \eqn{c}, if \eqn{s_c<\epsilon_{min}}, i.e. if the
#'   class weight \eqn{s_c} drops below some threshold
#'   \eqn{\epsilon_{min}}.
#'   This case indicates that class \eqn{c} has a negligible impact on the
#'   mixing distribution.
#' * We split class \eqn{c} into two classes \eqn{c_1} and \eqn{c_2}, if
#'   \eqn{s_c>\epsilon_{max}}.
#'   This case indicates that class \eqn{c} has a high influence on the mixing
#'   distribution whose approximation can potentially be improved by
#'   increasing the resolution in directions of high variance.
#'   Therefore, the class means \eqn{b_{c_1}} and \eqn{b_{c_2}} of the new
#'   classes \eqn{c_1} and \eqn{c_2} are shifted in opposite directions from
#'   the class mean \eqn{b_c} of the old class \eqn{c} in the direction of the
#'   highest variance.
#' * We join two classes \eqn{c_1} and \eqn{c_2} to one class \eqn{c}, if
#'   \eqn{||b_{c_1} - b_{c_2}||<\epsilon_{distmin}}, i.e. if
#'   the euclidean distance between the class means \eqn{b_{c_1}} and
#'   \eqn{b_{c_2}} drops below some threshold \eqn{\epsilon_{distmin}}.
#'   This case indicates location redundancy which should be repealed. The
#'   parameters of \eqn{c} are assigned by adding the values of \eqn{s} from
#'   \eqn{c_1} and \eqn{c_2} and averaging the values for \eqn{b} and
#'   \eqn{\Omega}.
#' The rules are executed in the above order, but only one rule per iteration
#' and only if \code{Cmax} is not exceeded.
#' @examples
#' ### parameter settings
#' s <- c(0.8,0.2)
#' b <- matrix(c(1,1,1,-1), ncol=2)
#' Omega <- matrix(c(0.5,0.3,0.3,0.5,1,-0.1,-0.1,0.8), ncol=2)
#'
#' ### Remove class 2
#' RprobitB:::update_classes_wb(Cmax = 10, epsmin = 0.3, epsmax = 0.9, distmin = 1,
#'                              s = s, b = b, Omega = Omega)
#'
#' ### Split class 1
#' RprobitB:::update_classes_wb(Cmax = 10, epsmin = 0.1, epsmax = 0.7, distmin = 1,
#'                              s = s, b = b, Omega = Omega)
#'
#' ### Join classes
#' RprobitB:::update_classes_wb(Cmax = 10, epsmin = 0.1, epsmax = 0.9, distmin = 3,
#'                              s = s, b = b, Omega = Omega)
#' @return
#' A list of updated values for \code{s}, \code{b}, and \code{Omega}.
#' @keywords
#' internal
#'
update_classes_wb <- function(Cmax, epsmin, epsmax, distmin, s, b, Omega) {
    .Call(`_RprobitB_update_classes_wb`, Cmax, epsmin, epsmax, distmin, s, b, Omega)
}

#' Dirichlet process-based update of latent classes
#' @description
#' This function updates the latent classes based on a Dirichlet process.
#' @details
#' To be added.
#' @param Cmax
#' The maximum number of classes.
#' @inheritParams RprobitB_parameter
#' @inheritParams check_prior
#' @param s_desc
#' If \code{TRUE}, sort the classes in descending class weight.
#' @examples
#' set.seed(1)
#' z <- c(rep(1,20),rep(2,30))
#' b <- matrix(c(1,1,1,-1), ncol=2)
#' Omega <- matrix(c(1,0.3,0.3,0.5,1,-0.3,-0.3,0.8), ncol=2)
#' beta <- sapply(z, function(z) rmvnorm(b[,z], matrix(Omega[,z],2,2)))
#' delta <- 1
#' xi <- numeric(2)
#' D <- diag(2)
#' nu <- 4
#' Theta <- diag(2)
#' RprobitB:::update_classes_dp(
#'   Cmax = 10, beta = beta, z = z, b = b, Omega = Omega,
#'   delta = delta, xi = xi, D = D, nu = nu, Theta = Theta
#' )
#' @return
#' A list of updated values for \code{z}, \code{b}, \code{Omega}, \code{s},
#' and \code{C}.
#' @keywords
#' internal
#'
update_classes_dp <- function(Cmax, beta, z, b, Omega, delta, xi, D, nu, Theta, s_desc = TRUE) {
    .Call(`_RprobitB_update_classes_dp`, Cmax, beta, z, b, Omega, delta, xi, D, nu, Theta, s_desc)
}

#' Density of multivariate normal distribution
#' @description
#' This function computes the density of a multivariate normal distribution.
#' @param x
#' A quantile vector of length \code{n}.
#' @param mean
#' The mean vector of length \code{n}.
#' @param Sigma
#' The covariance matrix of dimension \code{n} x \code{n}.
#' @param log
#' A boolean, if \code{TRUE} the logarithm of the density value is returned.
#' @return
#' The density value.
#' @export
#' @examples
#' x = c(0,0)
#' mean = c(0,0)
#' Sigma = diag(2)
#' dmvnorm(x = x, mean = mean, Sigma = Sigma)
#' dmvnorm(x = x, mean = mean, Sigma = Sigma, log = TRUE)
#' @keywords
#' internal distribution
#'
dmvnorm <- function(x, mean, Sigma, log = FALSE) {
    .Call(`_RprobitB_dmvnorm`, x, mean, Sigma, log)
}

#' Draw from multivariate normal distribution
#' @description
#' This function draws from a multivariate normal distribution.
#' @details
#' The function builds upon the following fact: If \eqn{\epsilon = (\epsilon_1,\dots,\epsilon_n)},
#' where each \eqn{\epsilon_i} is drawn independently from a standard normal distribution,
#' then \eqn{\mu+L\epsilon} is a draw from the multivariate normal distribution
#' \eqn{N(\mu,\Sigma)}, where \eqn{L} is the lower triangular factor of the
#' Choleski decomposition of \eqn{\Sigma}.
#' @param mu
#' The mean vector of length \code{n}.
#' @param Sigma
#' The covariance matrix of dimension \code{n} x \code{n}.
#' @return
#' A numeric vector of length \code{n}.
#' @export
#' @examples
#' mu <- c(0,0)
#' Sigma <- diag(2)
#' rmvnorm(mu = mu, Sigma = Sigma)
#' @keywords
#' internal distribution
#'
rmvnorm <- function(mu, Sigma) {
    .Call(`_RprobitB_rmvnorm`, mu, Sigma)
}

#' Draw from Dirichlet distribution
#' @description
#' Function to draw from a Dirichlet distribution.
#' @param delta
#' A vector, the concentration parameter.
#' @return
#' A vector, the sample from the Dirichlet distribution of the same length as \code{delta}.
#' @export
#' @examples
#' rdirichlet(delta = 1:3)
#' @keywords
#' internal distribution
#'
rdirichlet <- function(delta) {
    .Call(`_RprobitB_rdirichlet`, delta)
}

#' Draw from Wishart distribution
#' @description
#' This function draws from a Wishart and inverted Wishart distribution.
#' @details
#' The Wishart distribution is a generalization to multiple dimensions of the
#' gamma distributions and draws from the space of covariance matrices.
#' Its expectation is \code{nu*V} and its variance increases both in \code{nu}
#' and in the values of \code{V}.
#' The Wishart distribution is the conjugate prior to the precision matrix of
#' a multivariate normal distribution and proper if \code{nu} is greater than
#' the number of dimensions.
#' @param nu
#' A numeric, the degrees of freedom. Must be at least the number of dimensions.
#' @param V
#' A matrix, the scale matrix.
#' @return
#' A list, the draws from the Wishart (\code{W}), inverted Wishart (\code{IW}), and
#' corresponding Choleski decomposition (\code{C} and \code{CI}).
#' @export
#' @examples
#' rwishart(nu = 2, V = diag(2))
#' @keywords
#' internal distribution
#'
rwishart <- function(nu, V) {
    .Call(`_RprobitB_rwishart`, nu, V)
}

#' Update class weight vector
#' @description
#' This function updates the class weight vector by drawing from its posterior distribution.
#' @inheritParams check_prior
#' @param m
#' The vector of current class frequencies.
#' @return
#' A vector, a draw from the Dirichlet posterior distribution for \code{s}.
#' @details
#' Let \eqn{m=(m_1,\dots,m_C)} be the frequencies of \eqn{C} classes.
#' Given the class weight (probability) vector \eqn{s=(s_1,\dots,s_C)}, the distribution
#' of \eqn{m} is multinomial and its likelihood is \deqn{L(m\mid s) \propto \prod_{i=1}^C s_i^{m_i}.}
#' The conjugate prior \eqn{p(s)} for \eqn{s} is a Dirichlet distribution, which has a density function
#' proportional to \deqn{\prod_{i=1}^C s_i^{\delta_i-1},} where \eqn{\delta = (\delta_1,\dots,\delta_C)}
#' is the concentration parameter vector.
#' Note that in {RprobitB}, \eqn{\delta_1=\dots=\delta_C}. This restriction is necessary because the class number \eqn{C} can change.
#' The posterior distribution of \eqn{s} is proportional to \deqn{p(s) L(m\mid s) \propto \prod_{i=1}^C s_i^{\delta_i + m_i - 1},}
#' which in turn is proportional to a Dirichlet distribution with parameters \eqn{\delta+m}.
#' @examples
#' ### number of classes
#' C <- 4
#' ### current class sizes
#' m <- sample.int(C)
#' ### concentration parameter for Dirichlet prior (single-valued)
#' delta <- 1
#' ### updated class weight vector
#' update_s(delta = 1, m = m)
#' @export
#' @keywords
#' internal posterior
#'
update_s <- function(delta, m) {
    .Call(`_RprobitB_update_s`, delta, m)
}

#' Update class allocation vector
#' @description
#' This function updates the class allocation vector (independently for all observations)
#' by drawing from its conditional distribution.
#' @inheritParams RprobitB_parameter
#' @details
#' Let \eqn{z = (z_1,\dots,z_N)} denote the class allocation vector of the observations (mixed coefficients) \eqn{\beta = (\beta_1,\dots,\beta_N)}.
#' Independently for each \eqn{n}, the conditional probability \eqn{\Pr(z_n = c \mid s,\beta_n,b,\Omega)} of having \eqn{\beta_n}
#' allocated to class \eqn{c} for \eqn{c=1,\dots,C} depends on the class allocation vector \eqn{s}, the class means \eqn{b=(b_c)_c} and the class covariance
#' matrices \eqn{Omega=(Omega_c)_c} and is proportional to \deqn{s_c \phi(\beta_n \mid b_c,Omega_c).}
#' @return
#' An updated class allocation vector.
#' @examples
#' ### class weights for C = 2 classes
#' s <- rdirichlet(c(1,1))
#' ### coefficient vector for N = 1 decider and P_r = 2 random coefficients
#' beta <- matrix(c(1,1), ncol = 1)
#' ### class means and covariances
#' b <- cbind(c(0,0),c(1,1))
#' Omega <- cbind(c(1,0,0,1),c(1,0,0,1))
#' ### updated class allocation vector
#' update_z(s = s, beta = beta, b = b, Omega = Omega)
#' @export
#' @keywords
#' internal posterior
#'
update_z <- function(s, beta, b, Omega) {
    .Call(`_RprobitB_update_z`, s, beta, b, Omega)
}

#' Update class sizes
#' @description
#' This function updates the class size vector.
#' @inheritParams RprobitB_parameter
#' @param nozero
#' If \code{TRUE}, each element in the output vector \code{m} is at least one
#' (for numerical stability).
#' @return
#' An updated class size vector.
#' @examples
#' update_m(C = 3, z = c(1,1,1,2,2,3), FALSE)
#' @export
#' @keywords
#' internal posterior
#'
update_m <- function(C, z, nozero) {
    .Call(`_RprobitB_update_m`, C, z, nozero)
}

#' Update class means
#' @description
#' This function updates the class means (independent from the other classes).
#' @inheritParams RprobitB_parameter
#' @param m
#' The vector of class sizes of length \code{C}.
#' @inheritParams check_prior
#' @param Dinv
#' The precision matrix (i.e. the inverse of the covariance matrix) of dimension \code{P_r} x \code{P_r}
#' of the normal prior for each \code{b_c}.
#' @details
#' The following holds independently for each class \eqn{c}.
#' Let \eqn{b_c} be the mean of class number \eqn{c}. A priori, we assume that \eqn{b_c} is normally distributed
#' with mean vector \eqn{\xi} and covariance matrix \eqn{D}.
#' Let \eqn{(\beta_n)_{z_n=c}} be the collection of \eqn{\beta_n} that are currently allocated to class \eqn{c},
#' \eqn{m_c} the class size, and \eqn{\bar{b}_c} their arithmetic mean.
#' Assuming independence across draws, \eqn{(\beta_n)_{z_n=c}} has
#' a normal likelihood of \deqn{\prod_n \phi(\beta_n \mid b_c,\Omega_c),} where the product is over the values \eqn{n}
#' for which \eqn{z_n=c} holds.
#' Due to the conjugacy of the prior, the posterior \eqn{\Pr(b_c \mid (\beta_n)_{z_n=c})} follows a normal distribution
#' with mean \deqn{(D^{-1} + m_c\Omega_c^{-1})^{-1}(D^{-1}\xi + m_c\Omega_c^{-1}\bar{b}_c)} and covariance matrix
#' \deqn{(D^{-1} + m_c \Omega_c^{-1})^{-1}.}
#' @return
#' A matrix of updated means for each class in columns.
#' @examples
#' ### N = 100 decider, P_r = 2 random coefficients, and C = 2 latent classes
#' N <- 100
#' (b_true <- cbind(c(0,0),c(1,1)))
#' Omega <- matrix(c(1,0.3,0.3,0.5,1,-0.3,-0.3,0.8), ncol=2)
#' z <- c(rep(1,N/2),rep(2,N/2))
#' m <- as.numeric(table(z))
#' beta <- sapply(z, function(z) rmvnorm(b_true[,z], matrix(Omega[,z],2,2)))
#' ### prior mean vector and precision matrix (inverse of covariance matrix)
#' xi <- c(0,0)
#' Dinv <- diag(2)
#' ### updated class means (in columns)
#' update_b(beta = beta, Omega = Omega, z = z, m = m, xi = xi, Dinv = Dinv)
#' @export
#' @keywords
#' internal posterior
#'
update_b <- function(beta, Omega, z, m, xi, Dinv) {
    .Call(`_RprobitB_update_b`, beta, Omega, z, m, xi, Dinv)
}

#' Update class covariances
#' @description
#' This function updates the class covariances (independent from the other classes).
#' @inheritParams RprobitB_parameter
#' @param m
#' The vector of class sizes of length \code{C}.
#' @inheritParams check_prior
#' @details
#' The following holds independently for each class \eqn{c}.
#' Let \eqn{\Omega_c} be the covariance matrix of class number \code{c}.
#' A priori, we assume that \eqn{\Omega_c} is inverse Wishart distributed
#' with \eqn{\nu} degrees of freedom and scale matrix \eqn{\Theta}.
#' Let \eqn{(\beta_n)_{z_n=c}} be the collection of \eqn{\beta_n} that are currently allocated to class \eqn{c},
#' \eqn{m_c} the size of class \eqn{c}, and \eqn{b_c} the class mean vector.
#' Due to the conjugacy of the prior, the posterior \eqn{\Pr(\Omega_c \mid (\beta_n)_{z_n=c})} follows an inverted Wishart distribution
#' with \eqn{\nu + m_c} degrees of freedom and scale matrix \eqn{\Theta^{-1} + \sum_n (\beta_n - b_c)(\beta_n - b_c)'}, where
#' the product is over the values \eqn{n} for which \eqn{z_n=c} holds.
#' @return
#' A matrix of updated covariance matrices for each class in columns.
#' @examples
#' ### N = 100 decider, P_r = 2 random coefficients, and C = 2 latent classes
#' N <- 100
#' b <- cbind(c(0,0),c(1,1))
#' (Omega_true <- matrix(c(1,0.3,0.3,0.5,1,-0.3,-0.3,0.8), ncol=2))
#' z <- c(rep(1,N/2),rep(2,N/2))
#' m <- as.numeric(table(z))
#' beta <- sapply(z, function(z) rmvnorm(b[,z], matrix(Omega_true[,z],2,2)))
#' ### degrees of freedom and scale matrix for the Wishart prior
#' nu <- 1
#' Theta <- diag(2)
#' ### updated class covariance matrices (in columns)
#' update_Omega(beta = beta, b = b, z = z, m = m, nu = nu, Theta = Theta)
#' @export
#' @keywords
#' internal posterior
#'
update_Omega <- function(beta, b, z, m, nu, Theta) {
    .Call(`_RprobitB_update_Omega`, beta, b, z, m, nu, Theta)
}

#' Update coefficient vector of multiple linear regression
#' @description
#' This function updates the coefficient vector of a multiple linear regression.
#' @param mu0
#' The mean vector of the normal prior distribution for the coefficient vector.
#' @param Tau0
#' The precision matrix (i.e. inverted covariance matrix) of the normal prior distribution for the coefficient vector.
#' @param XSigX
#' The matrix \eqn{\sum_{n=1}^N X_n'\Sigma^{-1}X_n}. See below for details.
#' @param XSigU
#' The vector \eqn{\sum_{n=1}^N X_n'\Sigma^{-1}U_n}. See below for details.
#' @details
#' This function draws from the posterior distribution of \eqn{\beta} in the linear utility
#' equation \deqn{U_n = X_n\beta + \epsilon_n,} where \eqn{U_n} is the
#' (latent, but here assumed to be known) utility vector of decider \eqn{n = 1,\dots,N}, \eqn{X_n}
#' is the design matrix build from the choice characteristics faced by \eqn{n},
#' \eqn{\beta} is the unknown coefficient vector (this can be either the fixed
#' coefficient vector \eqn{\alpha} or the decider-specific coefficient vector \eqn{\beta_n}),
#' and \eqn{\epsilon_n} is the error term assumed to be normally distributed with mean \eqn{0}
#' and (known) covariance matrix \eqn{\Sigma}.
#' A priori we assume the (conjugate) normal prior distribution \deqn{\beta \sim N(\mu_0,T_0)}
#' with mean vector \eqn{\mu_0} and precision matrix (i.e. inverted covariance matrix) \eqn{T_0}.
#' The posterior distribution for \eqn{\beta} is normal with
#' covariance matrix \deqn{\Sigma_1 = (T_0 + \sum_{n=1}^N X_n'\Sigma^{-1}X_n)^{-1}} and mean vector
#' \deqn{\mu_1 = \Sigma_1(T_0\mu_0 + \sum_{n=1}^N X_n'\Sigma^{-1}U_n)}.
#' Note the analogy of \eqn{\mu_1} to the generalized least squares estimator
#' \deqn{\hat{\beta}_{GLS} = (\sum_{n=1}^N X_n'\Sigma^{-1}X_n)^{-1} \sum_{n=1}^N X_n'\Sigma^{-1}U_n} which
#' becomes weighted by the prior parameters \eqn{\mu_0} and \eqn{T_0}.
#' @return
#' A vector, a draw from the normal posterior distribution of the coefficient
#' vector in a multiple linear regression.
#' @examples
#' ### true coefficient vector
#' beta_true <- matrix(c(-1,1), ncol=1)
#' ### error term covariance matrix
#' Sigma <- matrix(c(1,0.5,0.2,0.5,1,0.2,0.2,0.2,2), ncol=3)
#' ### draw data
#' N <- 100
#' X <- replicate(N, matrix(rnorm(6), ncol=2), simplify = FALSE)
#' eps <- replicate(N, rmvnorm(mu = c(0,0,0), Sigma = Sigma), simplify = FALSE)
#' U <- mapply(function(X, eps) X %*% beta_true + eps, X, eps, SIMPLIFY = FALSE)
#' ### prior parameters for coefficient vector
#' mu0 <- c(0,0)
#' Tau0 <- diag(2)
#' ### draw from posterior of coefficient vector
#' XSigX <- Reduce(`+`, lapply(X, function(X) t(X) %*% solve(Sigma) %*% X))
#' XSigU <- Reduce(`+`, mapply(function(X, U) t(X) %*% solve(Sigma) %*% U, X, U, SIMPLIFY = FALSE))
#' beta_draws <- replicate(100, update_reg(mu0, Tau0, XSigX, XSigU), simplify = TRUE)
#' rowMeans(beta_draws)
#' @export
#' @importFrom stats sd
#' @keywords
#' internal posterior
#'
update_reg <- function(mu0, Tau0, XSigX, XSigU) {
    .Call(`_RprobitB_update_reg`, mu0, Tau0, XSigX, XSigU)
}

#' Update error term covariance matrix of multiple linear regression
#' @description
#' This function updates the error term covariance matrix of a multiple linear regression.
#' @param N
#' The draw size.
#' @param S
#' A matrix, the sum over the outer products of the residuals \eqn{(\epsilon_n)_{n=1,\dots,N}}.
#' @inheritParams check_prior
#' @details
#' This function draws from the posterior distribution of the covariance matrix \eqn{\Sigma} in the linear utility
#' equation \deqn{U_n = X_n\beta + \epsilon_n,} where \eqn{U_n} is the
#' (latent, but here assumed to be known) utility vector of decider \eqn{n = 1,\dots,N}, \eqn{X_n}
#' is the design matrix build from the choice characteristics faced by \eqn{n},
#' \eqn{\beta} is the coefficient vector, and \eqn{\epsilon_n} is the error term assumed to be
#' normally distributed with mean \eqn{0} and unknown covariance matrix \eqn{\Sigma}.
#' A priori we assume the (conjugate) Inverse Wishart distribution \deqn{\Sigma \sim W(\kappa,E)}
#' with \eqn{\kappa} degrees of freedom and scale matrix \eqn{E}.
#' The posterior for \eqn{\Sigma} is the Inverted Wishart distribution with \eqn{\kappa + N} degrees of freedom
#' and scale matrix \eqn{E^{-1}+S}, where \eqn{S = \sum_{n=1}^{N} \epsilon_n \epsilon_n'} is the sum over
#' the outer products of the residuals \eqn{(\epsilon_n = U_n - X_n\beta)_n}.
#' @return
#' A matrix, a draw from the Inverse Wishart posterior distribution of the error term
#' covariance matrix in a multiple linear regression.
#' @examples
#' ### true error term covariance matrix
#' (Sigma_true <- matrix(c(1,0.5,0.2,0.5,1,0.2,0.2,0.2,2), ncol=3))
#' ### coefficient vector
#' beta <- matrix(c(-1,1), ncol=1)
#' ### draw data
#' N <- 100
#' X <- replicate(N, matrix(rnorm(6), ncol=2), simplify = FALSE)
#' eps <- replicate(N, rmvnorm(mu = c(0,0,0), Sigma = Sigma_true), simplify = FALSE)
#' U <- mapply(function(X, eps) X %*% beta + eps, X, eps, SIMPLIFY = FALSE)
#' ### prior parameters for covariance matrix
#' kappa <- 4
#' E <- diag(3)
#' ### draw from posterior of coefficient vector
#' outer_prod <- function(X, U) (U - X %*% beta) %*% t(U - X %*% beta)
#' S <- Reduce(`+`, mapply(outer_prod, X, U, SIMPLIFY = FALSE))
#' Sigma_draws <- replicate(100, update_Sigma(kappa, E, N, S))
#' apply(Sigma_draws, 1:2, mean)
#' apply(Sigma_draws, 1:2, stats::sd)
#' @export
#' @keywords
#' internal posterior
#'
update_Sigma <- function(kappa, E, N, S) {
    .Call(`_RprobitB_update_Sigma`, kappa, E, N, S)
}

#' Update latent utility vector
#' @description
#' This function updates the latent utility vector, where (independent across deciders and choice occasions)
#' the utility for each alternative is updated conditional on the other utilities.
#' @param U
#' The current utility vector of length \code{J-1}.
#' @param y
#' An integer from \code{1} to \code{J}, the index of the chosen alternative.
#' @param sys
#' A vector of length \code{J-1}, the systematic utility part.
#' @param Sigmainv
#' The inverted error term covariance matrix of dimension \code{J-1} x \code{J-1}.
#' @details
#' The key ingredient to Gibbs sampling for probit models is considering the latent utilities
#' as parameters themselves which can be updated (data augmentation).
#' Independently for all deciders \eqn{n=1,\dots,N} and choice occasions \eqn{t=1,...,T_n},
#' the utility vectors \eqn{(U_{nt})_{n,t}} in the linear utility equation \eqn{U_{nt} = X_{nt} \beta + \epsilon_{nt}}
#' follow a \eqn{J-1}-dimensional truncated normal distribution, where \eqn{J} is the number of alternatives,
#' \eqn{X_{nt} \beta} the systematic (i.e. non-random) part of the utility and \eqn{\epsilon_{nt} \sim N(0,\Sigma)} the error term.
#' The truncation points are determined by the choices \eqn{y_{nt}}. To draw from a truncated multivariate
#' normal distribution, this function implemented the approach of Geweke (1998) to conditionally draw each component
#' separately from a univariate truncated normal distribution. See Oelschläger (2020) for the concrete formulas.
#' @references
#' See Geweke (1998) \emph{Efficient Simulation from the Multivariate Normal and Student-t Distributions Subject
#' to Linear Constraints and the Evaluation of Constraint Probabilities} for Gibbs sampling
#' from a truncated multivariate normal distribution. See Oelschläger and Bauer (2020) \emph{Bayes Estimation
#' of Latent Class Mixed Multinomial Probit Models} for its application to probit utilities.
#' @return
#' An updated utility vector of length \code{J-1}.
#' @examples
#' U <- c(0,0,0)
#' y <- 3
#' sys <- c(0,0,0)
#' Sigmainv <- solve(diag(3))
#' update_U(U, y, sys, Sigmainv)
#' @export
#' @keywords
#' internal posterior
#'
update_U <- function(U, y, sys, Sigmainv) {
    .Call(`_RprobitB_update_U`, U, y, sys, Sigmainv)
}

#' Update latent utility vector in the ranked probit case
#' @description
#' This function updates the latent utility vector in the ranked probit case.
#' @param U
#' The current utility vector of length \code{J-1}, differenced such that
#' the vector is negative.
#' @param sys
#' A vector of length \code{J-1}, the systematic utility part.
#' @param Sigmainv
#' The inverted error term covariance matrix of dimension
#' \code{J-1} x \code{J-1}.
#' @details
#' This update is basically the same as in the non-ranked case, despite that
#' the truncation point is zero.
#' @return
#' An updated utility vector of length \code{J-1}.
#' @examples
#' U <- c(0,0)
#' sys <- c(0,0)
#' Sigmainv <- diag(2)
#' update_U_ranked(U, sys, Sigmainv)
#' @export
#' @keywords
#' internal posterior
#'
update_U_ranked <- function(U, sys, Sigmainv) {
    .Call(`_RprobitB_update_U_ranked`, U, sys, Sigmainv)
}

#' Transform threshold increments to thresholds
#' @description
#' This helper function transforms the threshold increments \code{d} to the
#' thresholds \code{gamma}.
#' @param d
#' A numeric vector of threshold increments.
#' @details
#' The threshold vector \code{gamma} is computed from the threshold increments
#' \code{d} as \code{c(-100,0,cumsum(exp(d)),100)}, where the bounds
#' \code{-100} and \code{100} exist for numerical reasons and the first
#' threshold is fixed to \code{0} for identification.
#' @return
#' A numeric vector of the thresholds.
#' @examples
#' d_to_gamma(c(0,0,0))
#' @export
#' @keywords
#' internal posterior
#'
d_to_gamma <- function(d) {
    .Call(`_RprobitB_d_to_gamma`, d)
}

#' Log-likelihood in the ordered probit model
#' @description
#' This function computes the log-likelihood value given the threshold
#' increments \code{d}.
#' @param d
#' A numeric vector of threshold increments.
#' @param y
#' A matrix of the choices.
#' @param mu
#' A matrix of the systematic utilities.
#' @param Tvec
#' The element \code{Tvec} in \code{\link{sufficient_statistics}}.
#' @return
#' The log-likelihood value.
#' @examples
#' ll_ordered(c(0,0,0), matrix(1), matrix(1), 1)
#' @export
#' @keywords
#' internal posterior
#'
ll_ordered <- function(d, y, mu, Tvec) {
    .Call(`_RprobitB_ll_ordered`, d, y, mu, Tvec)
}

#' Update utility threshold increments
#' @description
#' This function updates the utility threshold increments
#' @param d
#' The current vector of utility threshold increments.
#' @param ll
#' Current log-likelihood value.
#' @param zeta
#' The mean vector of the normal prior for \code{d}.
#' @param Z
#' The covariance matrix of the normal prior for \code{d}.
#' @inheritParams ll_ordered
#' @return
#' The updated value for \code{d}.
#' @export
#' @keywords
#' internal posterior
#'
update_d <- function(d, y, mu, ll, zeta, Z, Tvec) {
    .Call(`_RprobitB_update_d`, d, y, mu, ll, zeta, Z, Tvec)
}

#' Markov chain Monte Carlo simulation for the probit model
#'
#' @description
#' This function draws from the posterior distribution of the probit model via
#' Markov chain Monte Carlo simulation-
#'
#' @details
#' This function is not supposed to be called directly, but rather via
#' \code{\link{fit_model}}.
#'
#' @param sufficient_statistics
#' The output of \code{\link{sufficient_statistics}}.
#' @inheritParams fit_model
#' @inheritParams RprobitB_data
#' @param init
#' The output of \code{\link{set_initial_gibbs_values}}.
#' @return
#' A list of Gibbs samples for
#' \itemize{
#'   \item \code{Sigma},
#'   \item \code{alpha} (if \code{P_f>0}),
#'   \item \code{s}, \code{z}, \code{b}, \code{Omega} (if \code{P_r>0}),
#'   \item \code{d} (if \code{ordered = TRUE}),
#' }
#' and a vector \code{class_sequence} of length \code{R}, where the \code{r}th
#' entry is the number of latent classes after iteration \code{r}.
#'
#' @keywords
#' internal
#'
gibbs_sampling <- function(sufficient_statistics, prior, latent_classes, fixed_parameter, init, R, B, print_progress, ordered, ranked) {
    .Call(`_RprobitB_gibbs_sampling`, sufficient_statistics, prior, latent_classes, fixed_parameter, init, R, B, print_progress, ordered, ranked)
}

#' Draw from one-sided truncated normal
#' @description
#' This function draws from a one-sided truncated univariate normal
#' distribution.
#' @param mu
#' The mean.
#' @param sig
#' The standard deviation.
#' @param trunpt
#' The truncation point.
#' @param above
#' A boolean, if \code{TRUE} truncate from above, otherwise from below.
#' @return
#' A numeric value.
#' @export
#' @examples
#' ### samples from a standard normal truncated at 1 from above
#' R <- 1e4
#' draws <- replicate(R, rtnorm(0,1,1,TRUE))
#' plot(density(draws))
#' @keywords
#' internal distribution
#'
rtnorm <- function(mu, sig, trunpt, above) {
    .Call(`_RprobitB_rtnorm`, mu, sig, trunpt, above)
}

#' Draw from two-sided truncated normal
#' @description
#' This function draws from a two-sided truncated univariate normal
#' distribution.
#' @param mu
#' The mean.
#' @param sig
#' The standard deviation.
#' @param lower_bound
#' The lower truncation point.
#' @param upper_bound
#' The upper truncation point.
#' @return
#' A numeric value.
#' @export
#' @examples
#' ### samples from a standard normal truncated at -2 and 2
#' R <- 1e4
#' draws <- replicate(R, rttnorm(0,1,-2,2))
#' plot(density(draws))
#' @keywords
#' internal distribution
#'
rttnorm <- function(mu, sig, lower_bound, upper_bound) {
    .Call(`_RprobitB_rttnorm`, mu, sig, lower_bound, upper_bound)
}

Try the RprobitB package in your browser

Any scripts or data that you put into this service are public.

RprobitB documentation built on Nov. 10, 2022, 5:12 p.m.