Nothing
#' SVEM Significance Test with Mixture Support
#'
#' Performs a whole-model significance test using the SVEM framework and allows
#' the user to specify mixture factor groups. Mixture factors are sets of
#' continuous variables that are constrained to sum to a constant (the
#' mixture total) and have optional lower and upper bounds. When mixture
#' groups are supplied, the grid of evaluation points is generated by
#' sampling Dirichlet variates over the mixture simplex rather than by
#' independently sampling each continuous predictor. Non-mixture
#' continuous predictors are sampled via a maximin Latin hypercube over
#' their observed ranges, and categorical predictors are sampled from
#' their observed levels.
#'
#' @param formula A formula specifying the model to be tested.
#' @param data A data frame containing the variables in the model.
#' @param mixture_groups Optional list describing one or more mixture factor
#' groups. Each element of the list should be a list with components
#' \code{vars} (character vector of column names), \code{lower} (numeric vector of
#' lower bounds of the same length as \code{vars}), \code{upper} (numeric vector
#' of upper bounds of the same length), and \code{total} (scalar specifying the
#' sum of the mixture variables). All mixture variables must be
#' included in \code{vars}, and no variable can appear in more than one
#' mixture group. Defaults to \code{NULL} (no mixtures).
#' @param nPoint Number of random points in the factor space (default: 2000).
#' @param nSVEM Number of SVEM fits on the original data (default: 10).
#' @param nPerm Number of SVEM fits on permuted responses for the reference
#' distribution (default: 150).
#' @param percent Percentage of variance to capture in the SVD (default: 90).
#' @param nBoot Number of bootstrap iterations within each SVEM fit (default: 100).
#' @param glmnet_alpha The alpha parameter(s) for glmnet (default: \code{c(1)}).
#' @param weight_scheme Weighting scheme for SVEM (default: "SVEM").
#' @param objective Objective used inside \code{SVEMnet()} to pick the bootstrap
#' path solution. One of \code{"auto"}, \code{"wAIC"}, \code{"wBIC"},
#' \code{"wGIC"}, \code{"wSSE"} (default: \code{"auto"}).
#' @param auto_ratio_cutoff Single cutoff for the automatic rule when
#' \code{objective = "auto"} (default 1.3). With \code{r = n_X/p_X}, if
#' \code{r >= auto_ratio_cutoff} use wAIC; else wBIC. Passed to \code{SVEMnet()}.
#' @param gamma Penalty weight used only when \code{objective = "wGIC"} (default 2).
#' Passed to \code{SVEMnet()}.
#' @param relaxed Logical; default \code{FALSE}. When \code{TRUE}, inner
#' \code{SVEMnet()} fits use glmnet's relaxed elastic net path and select both
#' lambda and relaxed gamma on each bootstrap. When \code{FALSE}, the standard
#' glmnet path is used. This value is passed through to \code{SVEMnet()}.
#' Note: if \code{relaxed = TRUE} and \code{glmnet_alpha} includes \code{0}, ridge
#' (\code{alpha = 0}) is dropped by \code{SVEMnet()} for relaxed fits.
#' @param verbose Logical; if \code{TRUE}, displays progress messages (default: \code{TRUE}).
#' @param ... Additional arguments passed to \code{SVEMnet()} and then to \code{glmnet()}
#' (for example: \code{penalty.factor}, \code{offset}, \code{lower.limits},
#' \code{upper.limits}, \code{standardize.response}, etc.). The \code{relaxed}
#' setting is controlled by the \code{relaxed} argument of this function and
#' any \code{relaxed} value passed via \code{...} is ignored with a warning.
#'
#' @return A list of class \code{svem_significance_test} containing:
#' \itemize{
#' \item \code{p_value}: median p-value across evaluation points.
#' \item \code{p_values}: vector of per-point p-values.
#' \item \code{d_Y}: distances for original fits.
#' \item \code{d_pi_Y}: distances for permutation fits.
#' \item \code{distribution_fit}: fitted SHASHo distribution object.
#' \item \code{data_d}: data frame combining distances and labels.
#' }
#'
#' @details
#' If no mixture groups are supplied, this function behaves identically
#' to a standard SVEM-based whole-model test, sampling non-mixture continuous
#' variables via a maximin Latin hypercube within their observed ranges,
#' and categorical variables from their observed levels.
#'
#' Internally, predictions at evaluation points use \code{predict.svem_model()}
#' with \code{se.fit = TRUE}. Rows with unseen categorical levels are returned
#' as \code{NA} and are excluded from distance summaries via \code{complete.cases()}.
#'
#' @seealso \code{SVEMnet()}, \code{predict.svem_model()}
#' @importFrom stats model.frame model.response model.matrix terms delete.response
#' @importFrom stats rgamma sd coef predict
#' @export
svem_significance_test <- function(formula, data, mixture_groups = NULL,
nPoint = 2000, nSVEM = 10, nPerm = 150,
percent = 90, nBoot = 100,
glmnet_alpha = c(1),
weight_scheme = c("SVEM"),
objective = c("auto", "wAIC", "wBIC", "wGIC", "wSSE"),
auto_ratio_cutoff = 1.3,
gamma = 2,
relaxed = FALSE,
verbose = TRUE, ...) {
# Dependencies used via :: must be available
if (!requireNamespace("lhs", quietly = TRUE)) {
stop("Package 'lhs' not found. Please install it to use svem_significance_test().")
}
if (!requireNamespace("gamlss", quietly = TRUE) ||
!requireNamespace("gamlss.dist", quietly = TRUE)) {
stop("Packages 'gamlss' and 'gamlss.dist' are required. Please install them.")
}
objective <- match.arg(objective)
weight_scheme <- match.arg(weight_scheme)
data <- as.data.frame(data)
# Sanitize ... so our explicit 'relaxed' cannot be double-specified
dots <- list(...)
if ("relaxed" %in% names(dots)) {
warning("Ignoring 'relaxed' in '...'; use the 'relaxed' argument of svem_significance_test().")
dots$relaxed <- NULL
}
# Training design summary (ranges/levels)
mf <- stats::model.frame(formula, data)
y <- stats::model.response(mf)
X <- stats::model.matrix(formula, mf)
intercept_col <- which(colnames(X) == "(Intercept)")
if (length(intercept_col) > 0) X <- X[, -intercept_col, drop = FALSE]
predictor_vars <- base::all.vars(stats::delete.response(stats::terms(formula, data = data)))
predictor_types <- sapply(data[predictor_vars], class)
continuous_vars <- predictor_vars[!predictor_types %in% c("factor", "character")]
categorical_vars <- predictor_vars[predictor_types %in% c("factor", "character")]
# Identify mixture vars
mixture_vars <- character(0)
if (!is.null(mixture_groups)) {
for (grp in mixture_groups) mixture_vars <- c(mixture_vars, grp$vars)
if (any(duplicated(mixture_vars))) {
dups <- unique(mixture_vars[duplicated(mixture_vars)])
stop("Mixture variables appear in multiple groups: ", paste(dups, collapse = ", "))
}
}
nonmix_continuous_vars <- setdiff(continuous_vars, mixture_vars)
# Non-mixture continuous via maximin LHS over observed ranges
if (length(nonmix_continuous_vars) > 0) {
ranges <- sapply(data[nonmix_continuous_vars], function(col) range(col, na.rm = TRUE))
T_continuous_raw <- as.matrix(lhs::maximinLHS(nPoint, length(nonmix_continuous_vars)))
T_continuous <- matrix(NA_real_, nrow = nPoint, ncol = length(nonmix_continuous_vars))
colnames(T_continuous) <- nonmix_continuous_vars
for (i in seq_along(nonmix_continuous_vars)) {
T_continuous[, i] <- T_continuous_raw[, i] * (ranges[2, i] - ranges[1, i]) + ranges[1, i]
}
T_continuous <- as.data.frame(T_continuous)
} else {
T_continuous <- NULL
}
# Mixture sampling with truncation
.sample_trunc_dirichlet <- function(n, lower, upper, total,
alpha = NULL, oversample = 4L, max_tries = 10000L) {
k <- length(lower)
if (length(upper) != k) stop("upper must have the same length as lower.")
if (is.null(alpha)) alpha <- rep(1, k)
min_sum <- sum(lower); max_sum <- sum(upper)
if (total < min_sum - 1e-12 || total > max_sum + 1e-12) {
stop("Infeasible mixture constraints: need sum(lower) <= total <= sum(upper).")
}
avail <- total - min_sum
if (avail <= 1e-12) {
return(matrix(rep(lower, each = n), nrow = n))
}
res <- matrix(NA_real_, nrow = n, ncol = k)
filled <- 0L; tries <- 0L
while (filled < n && tries < max_tries) {
m <- max(oversample * (n - filled), 1L)
g <- matrix(stats::rgamma(m * k, shape = alpha, rate = 1), ncol = k, byrow = TRUE)
W <- g / rowSums(g)
cand <- matrix(lower, nrow = m, ncol = k, byrow = TRUE) + avail * W
ok <- cand <= matrix(upper, nrow = m, ncol = k, byrow = TRUE)
ok <- rowSums(ok) == k
if (any(ok)) {
keep <- which(ok)
take <- min(length(keep), n - filled)
res[(filled + 1):(filled + take), ] <- cand[keep[seq_len(take)], , drop = FALSE]
filled <- filled + take
}
tries <- tries + 1L
}
if (filled < n) {
stop("Could not sample enough feasible mixture points within max_tries. ",
"Try relaxing upper bounds or increasing 'oversample'/'max_tries'.")
}
res
}
T_mixture <- NULL
if (!is.null(mixture_groups)) {
mix_all_vars <- unlist(lapply(mixture_groups, `[[`, "vars"))
T_mixture <- matrix(NA_real_, nrow = nPoint, ncol = length(mix_all_vars))
colnames(T_mixture) <- mix_all_vars
for (grp in mixture_groups) {
vars <- grp$vars
k <- length(vars)
lower <- if (!is.null(grp$lower)) grp$lower else rep(0, k)
upper <- if (!is.null(grp$upper)) grp$upper else rep(1, k)
total <- if (!is.null(grp$total)) grp$total else 1
if (length(lower) != k || length(upper) != k) {
stop("lower and upper must each have length equal to the number of mixture variables (",
paste(vars, collapse = ","), ").")
}
vals <- .sample_trunc_dirichlet(nPoint, lower, upper, total)
colnames(vals) <- vars
T_mixture[, vars] <- vals
}
T_mixture <- as.data.frame(T_mixture)
}
# Categorical sampling (use observed levels; keep training levels attribute for factors)
T_categorical <- NULL
if (length(categorical_vars) > 0) {
T_categorical <- vector("list", length(categorical_vars))
names(T_categorical) <- categorical_vars
for (v in categorical_vars) {
x <- data[[v]]
if (is.factor(x)) {
obs_lev <- levels(base::droplevels(x))
T_categorical[[v]] <- factor(
sample(obs_lev, nPoint, replace = TRUE),
levels = levels(x)
)
} else {
obs_lev <- sort(unique(as.character(x)))
T_categorical[[v]] <- factor(
sample(obs_lev, nPoint, replace = TRUE),
levels = obs_lev
)
}
}
T_categorical <- as.data.frame(T_categorical, stringsAsFactors = FALSE)
}
parts <- list(T_continuous, T_mixture, T_categorical)
parts <- parts[!vapply(parts, is.null, logical(1))]
if (length(parts) == 0) stop("No predictors provided.")
T_data <- do.call(cbind, parts)
y_mean <- mean(y)
M_Y <- matrix(NA_real_, nrow = nSVEM, ncol = nPoint)
if (isTRUE(verbose)) message("Fitting SVEM models to original data with mixture handling...")
for (i in seq_len(nSVEM)) {
svem_model <- tryCatch({
do.call(SVEMnet, c(list(
formula = formula, data = data, nBoot = nBoot, glmnet_alpha = glmnet_alpha,
weight_scheme = weight_scheme, objective = objective,
auto_ratio_cutoff = auto_ratio_cutoff, gamma = gamma,
relaxed = relaxed
), dots))
}, error = function(e) {
message("Error in SVEMnet during SVEM fitting: ", e$message)
NULL
})
if (is.null(svem_model)) next
pred_res <- predict(svem_model, newdata = T_data, debias = FALSE, se.fit = TRUE)
f_hat_Y_T <- pred_res$fit
s_hat_Y_T <- pred_res$se.fit
s_hat_Y_T[s_hat_Y_T == 0] <- 1e-6
h_Y <- (f_hat_Y_T - y_mean) / s_hat_Y_T
M_Y[i, ] <- h_Y
}
M_pi_Y <- matrix(NA_real_, nrow = nPerm, ncol = nPoint)
if (isTRUE(verbose)) message("Starting permutation testing...")
start_time_perm <- Sys.time()
for (j in seq_len(nPerm)) {
y_perm <- sample(y, replace = FALSE)
data_perm <- data
data_perm[[as.character(formula[[2]])]] <- y_perm
svem_model_perm <- tryCatch({
do.call(SVEMnet, c(list(
formula = formula, data = data_perm, nBoot = nBoot, glmnet_alpha = glmnet_alpha,
weight_scheme = weight_scheme, objective = objective,
auto_ratio_cutoff = auto_ratio_cutoff, gamma = gamma,
relaxed = relaxed
), dots))
}, error = function(e) {
message("Error in SVEMnet during permutation fitting: ", e$message)
NULL
})
if (is.null(svem_model_perm)) next
pred_res <- predict(svem_model_perm, newdata = T_data, debias = FALSE, se.fit = TRUE)
f_hat_piY_T <- pred_res$fit
s_hat_piY_T <- pred_res$se.fit
s_hat_piY_T[s_hat_piY_T == 0] <- 1e-6
h_piY <- (f_hat_piY_T - y_mean) / s_hat_piY_T
M_pi_Y[j, ] <- h_piY
if (isTRUE(verbose) && (j %% 10 == 0 || j == nPerm)) {
elapsed_time <- Sys.time() - start_time_perm
elapsed_secs <- as.numeric(elapsed_time, units = "secs")
estimated_total_secs <- (elapsed_secs / j) * nPerm
remaining_secs <- estimated_total_secs - elapsed_secs
remaining_time_formatted <- sprintf("%02d:%02d:%02d",
floor(remaining_secs / 3600),
floor((remaining_secs %% 3600) / 60),
floor(remaining_secs %% 60))
message(sprintf("Permutation %d/%d completed. Estimated time remaining: %s",
j, nPerm, remaining_time_formatted))
}
}
M_Y <- M_Y[stats::complete.cases(M_Y), , drop = FALSE]
M_pi_Y <- M_pi_Y[stats::complete.cases(M_pi_Y), , drop = FALSE]
if (nrow(M_Y) == 0) stop("All SVEM fits on the original data failed.")
if (nrow(M_pi_Y) == 0) stop("All SVEM fits on permuted data failed.")
col_means_M_pi_Y <- colMeans(M_pi_Y)
col_sds_M_pi_Y <- apply(M_pi_Y, 2, sd)
col_sds_M_pi_Y[col_sds_M_pi_Y == 0] <- 1e-6
tilde_M_pi_Y <- scale(M_pi_Y, center = col_means_M_pi_Y, scale = col_sds_M_pi_Y)
M_Y_centered <- sweep(M_Y, 2, col_means_M_pi_Y, "-")
tilde_M_Y <- sweep(M_Y_centered, 2, col_sds_M_pi_Y, "/")
svd_res <- svd(tilde_M_pi_Y)
V <- svd_res$v; s <- svd_res$d
evalues_temp <- s^2
evalues_temp <- evalues_temp / sum(evalues_temp) * ncol(tilde_M_pi_Y)
cumsum_evalues <- cumsum(evalues_temp) / sum(evalues_temp) * 100
k_idx <- which(cumsum_evalues >= percent)[1]
if (is.na(k_idx)) k_idx <- length(evalues_temp)
evalues <- evalues_temp[1:k_idx]
evectors <- V[, 1:k_idx, drop = FALSE]
T2_perm <- rowSums((tilde_M_pi_Y %*% evectors %*% diag(1 / evalues)) * (tilde_M_pi_Y %*% evectors))
d_pi_Y <- sqrt(T2_perm)
T2_Y <- rowSums((tilde_M_Y %*% evectors %*% diag(1 / evalues)) * (tilde_M_Y %*% evectors))
d_Y <- sqrt(T2_Y)
if (length(d_pi_Y) == 0) stop("No valid permutation distances to fit a distribution.")
suppressMessages({
distribution_fit <- tryCatch({
gamlss::gamlss(
d_pi_Y ~ 1,
family = gamlss.dist::SHASHo(mu.link = "identity", sigma.link = "log",
nu.link = "identity", tau.link = "log"),
control = gamlss::gamlss.control(n.cyc = 1000, trace = FALSE)
)
}, error = function(e) {
message("Error in fitting SHASHo distribution: ", e$message)
NULL
})
})
if (is.null(distribution_fit)) stop("Failed to fit SHASHo distribution.")
mu <- as.numeric(stats::coef(distribution_fit, what = "mu"))
sigma <- exp(as.numeric(stats::coef(distribution_fit, what = "sigma")))
nu <- as.numeric(stats::coef(distribution_fit, what = "nu"))
tau <- exp(as.numeric(stats::coef(distribution_fit, what = "tau")))
p_values <- 1 - gamlss.dist::pSHASHo(d_Y, mu = mu, sigma = sigma, nu = nu, tau = tau)
p_value <- stats::median(p_values)
response_name <- as.character(formula[[2]])
data_d <- data.frame(
D = c(d_Y, d_pi_Y),
Source_Type = c(rep("Original", length(d_Y)), rep("Permutation", length(d_pi_Y))),
Response = response_name
)
results_list <- list(
p_value = p_value,
p_values = p_values,
d_Y = d_Y,
d_pi_Y = d_pi_Y,
distribution_fit = distribution_fit,
data_d = data_d
)
class(results_list) <- "svem_significance_test"
results_list
}
Any scripts or data that you put into this service are public.
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.