inst/doc/Adding_New_Methods.R

## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
  collapse = TRUE,
  comment = "#>"
)

## ----eval=FALSE---------------------------------------------------------------
# #' @title PET (Precision-Effect Test) Method
# #'
# #' @description
# #' Implements the Precision-Effect Test for publication bias correction.
# #' PET regresses effect sizes against standard errors to test for and correct
# #' publication bias. The intercept represents the bias-corrected effect size
# #' estimate.
# #'
# #' @param method_name Method name (automatically passed)
# #' @param data Data frame with yi (effect sizes) and sei (standard errors)
# #' @param settings List of method settings
# #'
# #' @return Data frame with PET results
# #'
# #' @export
# method.PET <- function(method_name, data, settings = NULL) {
# 
#   # Extract data
#   effect_sizes    <- data$yi
#   standard_errors <- data$sei
# 
#   # Input validation and error handling
#   if (length(effect_sizes) < 3)
#     stop("At least 3 estimates required for PET analysis", call. = FALSE)
# 
#   if (stats::var(standard_errors) <= 0)
#     stop("No variance in standard errors", call. = FALSE)
# 
#   # Implement the statistical method
#   pet_model <- stats::lm(effect_sizes ~ standard_errors,
#                         weights = 1/standard_errors^2)
# 
#   # Extract and process results
#   coefficients    <- stats::coef(pet_model)
#   se_coefficients <- summary(pet_model)$coefficients[, "Std. Error"]
#   p_values        <- summary(pet_model)$coefficients[, "Pr(>|t|)"]
# 
#   # Main estimates
#   estimate    <- coefficients[1]  # Intercept = bias-corrected effect
#   estimate_se <- se_coefficients[1]
#   estimate_p  <- p_values[1]
# 
#   # Additional method-specific results
#   bias_coefficient <- coefficients[2]
#   bias_p_value     <- p_values[2]
# 
#   # Calculate confidence intervals
#   estimate_lci <- estimate - 1.96 * estimate_se
#   estimate_uci <- estimate + 1.96 * estimate_se
# 
#   # Return standardized results
#   return(data.frame(
#     method         = method_name,
#     estimate       = estimate,
#     standard_error = estimate_se,
#     ci_lower       = estimate_lci,
#     ci_upper       = estimate_uci,
#     p_value        = estimate_p,
#     BF             = NA,
#     convergence    = TRUE,
#     note           = NA,
#     # Method-specific columns
#     bias_coefficient = bias_coefficient,
#     bias_p_value     = bias_p_value
#   ))
# }

## ----eval=FALSE---------------------------------------------------------------
# #' @export
# method_settings.PET <- function(method_name) {
# 
#   settings <- list(
#     "default" = list() # PET has no configurable settings
#   )
# 
#   return(settings)
# }

## ----eval=FALSE---------------------------------------------------------------
# # Example with multiple settings (from RMA method)
# method_settings.RMA <- function(method_name) {
# 
#   settings <- list(
#     "default" = list(
#       method = "REML",
#       test.uni = "knha",
#       test.mv = "t",
#       control = list(stepadj = 0.5, maxiter = 500)
#     )
#   )
# 
#   return(settings)
# }

## ----eval=FALSE---------------------------------------------------------------
# #' @export
# method_extra_columns.PET <- function(method_name) {
#   c("bias_coefficient", "bias_p_value")
# }

## ----eval=FALSE---------------------------------------------------------------
# # Create example data
# data <- data.frame(
#   yi  = c(0.2, 0.3, 0.1, 0.4, 0.25),
#   sei = c(0.1, 0.15, 0.08, 0.12, 0.09)
# )
# 
# # Run your method
# result <- run_method("PET", data)
# print(result)
# 
# # Use specific settings (if available)
# result <- run_method("PET", data, "default")

Try the PublicationBiasBenchmark package in your browser

Any scripts or data that you put into this service are public.

PublicationBiasBenchmark documentation built on March 16, 2026, 5:07 p.m.