eval_pedigree_ll | R Documentation |
Approximate the log marginal likelihood and the derivatives with respect to the model parameters.
eval_pedigree_ll( ptr, par, maxvls, abs_eps, rel_eps, indices = NULL, minvls = -1L, do_reorder = TRUE, use_aprx = FALSE, n_threads = 1L, cluster_weights = NULL, standardized = FALSE, method = 0L, use_tilting = FALSE, vls_scales = NULL ) eval_pedigree_grad( ptr, par, maxvls, abs_eps, rel_eps, indices = NULL, minvls = -1L, do_reorder = TRUE, use_aprx = FALSE, n_threads = 1L, cluster_weights = NULL, standardized = FALSE, method = 0L, use_tilting = FALSE, vls_scales = NULL ) eval_pedigree_hess( ptr, par, maxvls, abs_eps, rel_eps, indices = NULL, minvls = -1L, do_reorder = TRUE, use_aprx = FALSE, n_threads = 1L, cluster_weights = NULL, standardized = FALSE, method = 0L, use_tilting = FALSE, vls_scales = NULL )
ptr |
object from |
par |
numeric vector with parameters. For an object from
|
maxvls |
maximum number of samples in the approximation for each marginal likelihood term. |
abs_eps |
absolute convergence threshold. |
rel_eps |
relative convergence threshold. |
indices |
zero-based vector with indices of which log marginal
likelihood terms to include. Use |
minvls |
minimum number of samples for each marginal likelihood term. Negative values provides a default which depends on the dimension of the integration. |
do_reorder |
|
use_aprx |
|
n_threads |
number of threads to use. |
cluster_weights |
numeric vector with weights for each cluster. Use
|
standardized |
logical for whether to use the standardized or direct
parameterization. See |
method |
integer with the method to use. Zero yields randomized Korobov lattice rules while one yields scrambled Sobol sequences. |
use_tilting |
|
vls_scales |
can be a numeric vector with a positive scalar for each
cluster. Then |
eval_pedigree_hess
is only implemented for objects from
pedigree_ll_terms
.
eval_pedigree_ll
:
a scalar with the log marginal likelihood approximation.
It has an attribute called "n_fails"
which shows the number of
log marginal likelihood term approximations which do not satisfy
the abs_eps
and rel_eps
criteria and an attribute called
std
with a standard error estimate based on the delta rule.
eval_pedigree_grad
: a vector with the derivatives with
respect to par
. An attribute called "logLik"
contains the
log marginal likelihood approximation. There will also be "n_fails"
attribute like for eval_pedigree_ll
and an attribute called
"std"
which first element is the standard error estimate of the
log likelihood based on the delta method and the last elements are the
standard error estimates of the gradient. The latter ignores the Monte Carlo
error from the likelihood approximation.
eval_pedigree_hess
: a matrix with the hessian with
respect to par
.
An attribute called "logLik"
contains the
log marginal likelihood approximation and an attribute called "grad"
contains the gradient·
The attribute "hess_org"
contains the Hessian with the scale
parameter on the identity scale rather than the log scale.
"vcov"
and "vcov_org"
are
the covariance matrices from the hessian and "hess_org"
.
# three families as an example fam_dat <- list( list( y = c(FALSE, TRUE, FALSE, FALSE), X = structure(c( 1, 1, 1, 1, 1.2922654151273, 0.358134905909256, -0.734963997107464, 0.855235473516044, -1.16189500386223, -0.387298334620742, 0.387298334620742, 1.16189500386223), .Dim = 4:3, .Dimnames = list( NULL, c("(Intercept)", "X1", ""))), rel_mat = structure(c( 1, 0.5, 0.5, 0.125, 0.5, 1, 0.5, 0.125, 0.5, 0.5, 1, 0.125, 0.125, 0.125, 0.125, 1), .Dim = c(4L, 4L)), met_mat = structure(c(1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1), .Dim = c(4L, 4L))), list( y = c(FALSE, FALSE, FALSE), X = structure(c( 1, 1, 1, -0.0388728997202442, -0.0913782435233639, -0.0801619722392612, -1, 0, 1), .Dim = c(3L, 3L)), rel_mat = structure(c( 1, 0.5, 0.125, 0.5, 1, 0.125, 0.125, 0.125, 1), .Dim = c(3L, 3L)), met_mat = structure(c( 1, 1, 0, 1, 1, 0, 0, 0, 1), .Dim = c(3L, 3L))), list( y = c(TRUE, FALSE), X = structure(c( 1, 1, 0.305275750370738, -1.49482995913648, -0.707106781186547, 0.707106781186547), .Dim = 2:3, .Dimnames = list( NULL, c("(Intercept)", "X1", ""))), rel_mat = structure(c(1, 0.5, 0.5, 1), .Dim = c(2L, 2L)), met_mat = structure(c(1, 1, 1, 1), .Dim = c(2L, 2L)))) # get the data into the format needed for the package dat_arg <- lapply(fam_dat, function(x){ # we need the following for each family: # y: the zero-one outcomes. # X: the design matrix for the fixed effects. # scale_mats: list with the scale matrices for each type of effect. list(y = as.numeric(x$y), X = x$X, scale_mats = list(x$rel_mat, x$met_mat)) }) # get a pointer to the C++ object ptr <- pedigree_ll_terms(dat_arg, max_threads = 1L) # approximate the log marginal likelihood beta <- c(-1, 0.3, 0.2) # fixed effect coefficients scs <- c(0.5, 0.33) # scales parameters set.seed(44492929) system.time(ll1 <- eval_pedigree_ll( ptr = ptr, par = c(beta, log(scs)), abs_eps = -1, maxvls = 1e5, rel_eps = 1e-5, minvls = 2000, use_aprx = FALSE)) ll1 # the approximation # with the approximation of pnorm and qnorm system.time(ll2 <- eval_pedigree_ll( ptr = ptr, par = c(beta, log(scs)), abs_eps = -1, maxvls = 1e5, rel_eps = 1e-5, minvls = 2000, use_aprx = TRUE)) all.equal(ll1, ll2, tolerance = 1e-5) # cluster weights can be used as follows to repeat the second family three # times and remove the third system.time(deriv_w_weight <- eval_pedigree_grad( ptr = ptr, par = c(beta, log(scs)), abs_eps = -1, maxvls = 1e6, rel_eps = 1e-3, minvls = 2000, use_aprx = TRUE, cluster_weights = c(1, 3, 0))) # the same as manually repeating second cluster and not including the third dum_dat <- dat_arg[c(1, 2, 2, 2)] dum_ptr <- pedigree_ll_terms(dum_dat, 1L) system.time(deriv_dum <- eval_pedigree_grad( ptr = dum_ptr, par = c(beta, log(scs)), abs_eps = -1, maxvls = 1e6, rel_eps = 1e-3, minvls = 2000, use_aprx = TRUE)) all.equal(deriv_dum, deriv_w_weight, tolerance = 1e-3) # the hessian is computed on the scale parameter scale rather than on the # log of the scale parameters system.time(hess_w_weight <- eval_pedigree_hess( ptr = ptr, par = c(beta, log(scs)), abs_eps = -1, maxvls = 1e6, rel_eps = 1e-3, minvls = 2000, use_aprx = TRUE, cluster_weights = c(1, 3, 0))) system.time(hess_dum <- eval_pedigree_hess( ptr = dum_ptr, par = c(beta, log(scs)), abs_eps = -1, maxvls = 1e6, rel_eps = 1e-3, minvls = 2000, use_aprx = TRUE)) attr(hess_w_weight, "n_fails") <- attr(hess_dum, "n_fails") <- NULL all.equal(hess_w_weight, hess_dum, tolerance = 1e-3) # the results are consistent with the gradient output all.equal(attr(deriv_dum, "logLik"), attr(hess_dum, "logLik"), tolerance = 1e-5) hess_grad <- attr(hess_dum, "grad") all.equal(hess_grad, deriv_dum, check.attributes = FALSE, tolerance = 1e-3) # with loadings dat_arg_loadings <- lapply(fam_dat, function(x){ list(y = as.numeric(x$y), X = x$X, Z = x$X[, 1:2], scale_mats = list(x$rel_mat, x$met_mat)) }) ptr_loadings <- pedigree_ll_terms_loadings(dat_arg_loadings, max_threads = 1L) scs <- c(log(0.5) / 2, 0.1, log(0.33) / 2, 0.2) # got more scales parameters eval_pedigree_ll( ptr = ptr_loadings, par = c(beta, scs), abs_eps = -1, maxvls = 1e4, rel_eps = 1e-3, minvls = 2000, use_aprx = TRUE) eval_pedigree_grad( ptr = ptr_loadings, par = c(beta, scs), abs_eps = -1, maxvls = 1e4, rel_eps = 1e-3, minvls = 2000, use_aprx = TRUE) # can recover the result from before scs <- c(log(0.5) / 2, 0, log(0.33) / 2, 0) ll3 <- eval_pedigree_ll( ptr = ptr_loadings, par = c(beta, scs), abs_eps = -1, maxvls = 1e4, rel_eps = 1e-3, minvls = 2000, use_aprx = TRUE) all.equal(ll1, ll3, tolerance = 1e-5)
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.