Nothing
## usethis namespace: start
#'
#' @import torch
#'
#' @importFrom gsl hyperg_U
#'
#' @importFrom progress progress_bar
#'
#' @importFrom stats model.response model.matrix model.frame rnorm na.pass delete.response .getXlevels pt rgamma median rbeta
#'
#' @importFrom methods formalArgs
#'
#' @importFrom utils packageVersion getFromNamespace zip unzip
#'
#' @importFrom graphics boxplot
#'
#' @importFrom mniw rMT
#'
#'
## usethis namespace: end
.onAttach <- function(libname, pkgname) {
if (!torch::torch_is_installed()) {
packageStartupMessage("Welcome to shrinkGPR version ", packageVersion("shrinkGPR"),
".\n \nNOTE: No torch installation detected. This package requires torch to function.",
"Please install torch by running torch::install_torch()")
} else {
if (cuda_is_available()) {
CUDA_message <- "CUDA installation detected and functioning with torch.
CUDA will be used for GPU acceleration by default."
} else {
CUDA_message <- "NOTE: No CUDA installation detected. This may be quite slow for larger datasets.
Consider installing CUDA for GPU acceleration. Information on this can be found at:
https://cran.r-project.org/web/packages/torch/vignettes/installation.html"
}
start_message <- paste0("\nWelcome to shrinkGPR version ", packageVersion("shrinkGPR"),
".\n \n", CUDA_message)
packageStartupMessage(start_message)
}
}
# Create holding environment for JIT compiled code
.shrinkGPR_internal <- new.env(parent = emptyenv())
.onLoad <- function(libname, pkgname) {
if (torch::torch_is_installed()) {
.shrinkGPR_internal$jit_funcs <- jit_compile("
def sylvester_full(
z: torch.Tensor,
Q_param: torch.Tensor,
r1: torch.Tensor,
r2: torch.Tensor,
b: torch.Tensor,
diag1: torch.Tensor,
diag2: torch.Tensor,
n_householder: List[int]
) -> Tuple[torch.Tensor, torch.Tensor]:
Q_t = Q_param.transpose(0, 1)
norms = torch.norm(Q_t, p=2, dim=0, keepdim=True).clamp_min(1e-8)
V = Q_t / norms
d = V.size(0)
Q = torch.eye(d, device=z.device)
for i in range(n_householder[0]):
v = V[:, i].unsqueeze(1)
H = torch.eye(d, device=z.device) - 2 * torch.mm(v, v.t())
Q = torch.mm(Q, H)
rqzb = torch.matmul(r2, torch.matmul(Q.t(), z.t())) + b.unsqueeze(1)
h_rqzb = torch.tanh(rqzb)
zk = z + torch.matmul(Q, torch.matmul(r1, h_rqzb)).t()
diag_j = diag1 * diag2
h_deriv = 1.0 - h_rqzb.pow(2)
diag_j = h_deriv.t() * diag_j
diag_j = diag_j + 1.0
log_diag_j = torch.log(torch.abs(diag_j)).sum(dim=1)
return zk, log_diag_j
def sqdist(
x: torch.Tensor,
thetas: torch.Tensor,
x_star: Optional[torch.Tensor]
) -> torch.Tensor:
x_scaled = x.unsqueeze(0) * torch.sqrt(thetas.unsqueeze(1))
if x_star is None:
return torch.cdist(x_scaled, x_scaled, p=2.0).pow(2)
else:
x_star_scaled = x_star.unsqueeze(0) * torch.sqrt(thetas.unsqueeze(1))
return torch.cdist(x_star_scaled, x_scaled, p=2.0).pow(2)
def ldnorm(
K: torch.Tensor,
L: Optional[torch.Tensor],
sigma2: torch.Tensor,
y: torch.Tensor,
x_mean: Optional[torch.Tensor],
beta: Optional[torch.Tensor],
) -> torch.Tensor:
B, N, _ = K.size()
I = torch.eye(N, device=K.device).unsqueeze(0).expand(B, N, N)
sigma_term = I * sigma2.view(B, 1, 1)
K_eps = K + sigma_term
# Decide which Cholesky factor to use
if L is None:
L_local = torch.cholesky(K_eps)
else:
# Tell TorchScript that inside this block L is a Tensor, not Optional[Tensor]
assert L is not None
L_local = L
slogdet = 2.0 * torch.sum(torch.log(torch.diagonal(L_local, dim1=-2, dim2=-1)), dim=1)
if (beta is not None and x_mean is not None):
y_demean = (y - torch.matmul(x_mean, beta.transpose(0, 1))).transpose(0, 1).unsqueeze(-1)
y_batch = y_demean
else:
y_batch = y.unsqueeze(0).expand(B, N, 1)
alpha = torch.cholesky_solve(y_batch, L_local)
quad = torch.matmul(y_batch.transpose(1, 2), alpha).squeeze(-1).squeeze(-1)
log_lik = -0.5 * slogdet - 0.5 * quad
return log_lik
def ldt(
K: torch.Tensor,
L: Optional[torch.Tensor],
sigma2: torch.Tensor,
y: torch.Tensor,
x_mean: Optional[torch.Tensor],
beta: Optional[torch.Tensor],
nu: torch.Tensor
) -> torch.Tensor:
B, N, _ = K.size()
I = torch.eye(N, device=K.device).unsqueeze(0).expand(B, N, N)
sigma_term = I * sigma2.view(B, 1, 1)
K_eps = K + sigma_term
# Decide which Cholesky factor to use
if L is None:
L_local = torch.cholesky(K_eps)
else:
# Tell TorchScript that inside this block L is a Tensor, not Optional[Tensor]
assert L is not None
L_local = L
slogdet = 2.0 * torch.sum(torch.log(torch.diagonal(L_local, dim1=-2, dim2=-1)), dim=1)
if (beta is not None and x_mean is not None):
y_demean = (y - torch.matmul(x_mean, beta.transpose(0, 1))).transpose(0, 1).unsqueeze(-1)
y_batch = y_demean
else:
y_batch = y.unsqueeze(0).expand(B, N, 1)
alpha = torch.cholesky_solve(y_batch, L_local)
quad = torch.matmul(y_batch.transpose(1, 2), alpha).squeeze(-1).squeeze(-1)
log_lik = torch.lgamma((nu + N)*0.5) - 0.5 * N * torch.log(nu - 2) - torch.lgamma(0.5 * nu) -0.5 * slogdet - 0.5 * (nu + N) * torch.log(1 + 1/(nu - 2) * quad)
return log_lik
def make_tril(
x: torch.Tensor,
size: List[int]
) -> torch.Tensor:
size_int = int(size[0])
tril_indices = torch.tril_indices(size_int, size_int, device=x.device, offset = -1)
A = torch.zeros(x.shape[0], size_int, size_int, device=x.device)
A[:, tril_indices[0], tril_indices[1]] = x[:, 0:(size_int*(size_int-1)//2)]
return A
def kernel_se(thetas: torch.Tensor, tau: torch.Tensor, x: torch.Tensor, x_star: Optional[torch.Tensor]) -> torch.Tensor:
D = sqdist(x, thetas, x_star)
return (1.0 / tau.unsqueeze(1).unsqueeze(2)) * torch.exp(-0.5 * D)
def kernel_matern_12(thetas: torch.Tensor, tau: torch.Tensor, x: torch.Tensor, x_star: Optional[torch.Tensor]) -> torch.Tensor:
D = torch.sqrt(sqdist(x, thetas, x_star) + 1e-4)
return (1.0 / tau.unsqueeze(1).unsqueeze(2)) * torch.exp(-D)
def kernel_matern_32(thetas: torch.Tensor, tau: torch.Tensor, x: torch.Tensor, x_star: Optional[torch.Tensor]) -> torch.Tensor:
D = torch.sqrt(sqdist(x, thetas, x_star) + 1e-4)
sqrt3 = 3.0 ** 0.5
return (1.0 / tau.unsqueeze(1).unsqueeze(2)) * (1 + sqrt3 * D) * torch.exp(-sqrt3 * D)
def kernel_matern_52(thetas: torch.Tensor, tau: torch.Tensor, x: torch.Tensor, x_star: Optional[torch.Tensor]) -> torch.Tensor:
D = torch.sqrt(sqdist(x, thetas, x_star) + 1e-4)
sqrt5 = 5.0 ** 0.5
return (1.0 / tau.unsqueeze(1).unsqueeze(2)) * (1 + sqrt5 * D + (5.0 / 3.0) * D ** 2) * torch.exp(-sqrt5 * D)
# New functions for multivariate outputs
def ldnorm_multi(
L_K: torch.Tensor,
L_Om: torch.Tensor,
y: torch.Tensor,
M: List[int],
N: List[int]
) -> torch.Tensor:
n_latent = L_K.size(0)
M_int = M[0]
N_int = N[0]
alpha = torch.cholesky_solve(y.unsqueeze(0).expand(n_latent, N_int, M_int), L_K, upper=False)
Yt = y.t().unsqueeze(0).expand(n_latent, M_int, N_int)
B = torch.bmm(Yt, alpha)
Om_inv_B = torch.cholesky_solve(B, L_Om, upper=False)
tr = -0.5 * torch.sum(torch.diagonal(Om_inv_B, dim1=-2, dim2=-1), dim=1)
diag_K = torch.diagonal(L_K, dim1=-2, dim2=-1)
diag_Om = torch.diagonal(L_Om, dim1=-2, dim2=-1)
slogdet_K = 2.0 * torch.sum(torch.log(diag_K), dim=1)
slogdet_Om = 2.0 * torch.sum(torch.log(diag_Om), dim=1)
log_lik = -0.5 * float(M_int) * slogdet_K - 0.5 * float(N_int) * slogdet_Om + tr
return log_lik
")
}
}
Any scripts or data that you put into this service are public.
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.