knitr::opts_chunk$set(echo = TRUE, cache = TRUE)
This is a short effort to give users an idea of how long the functions take to process. The benchmarks were performed using the default R install on Travis CI.
Note: the benchmarks run significantly faster on my personal machine (MacBook Pro Late 2016). In most cases, the processes take $\approx$ 25\% of the time.
We will be estimating a tri-diagonal precision matrix with dimension $p = 100$:
\vspace{0.5cm}
library(SCPME) library(microbenchmark) # generate data from tri-diagonal (sparse) matrix data = data_gen(p = 100, n = 1000, r = 5) # calculate sample covariance matrix sample = (nrow(data$X) - 1)/nrow(data$X)*cov(data$X)
\vspace{0.5cm}
\vspace{0.5cm}
# benchmark shrink - default tolerance microbenchmark(shrink(S = sample, crit.cv = "loglik", lam = 0.1, tol.abs = 1e-4, tol.rel = 1e-4, trace = "none"))
\vspace{0.5cm}
\vspace{0.5cm}
# benchmark shrink - tolerance 1e-8 microbenchmark(shrink(S = sample, crit.cv = "loglik", lam = 0.1, tol.abs = 1e-8, tol.rel = 1e-8, trace = "none"))
\vspace{0.5cm}
lam
:
\vspace{0.5cm}
# benchmark shrink CV - default parameter grid microbenchmark(shrink(X = data$X, Y = data$Y, trace = "none"), times = 5)
\vspace{0.5cm}
cores = 2
) cross validation:
\vspace{0.5cm}
# benchmark shrink parallel CV microbenchmark(shrink(X = data$X, Y = data$Y, cores = 2, trace = "none"), times = 5)
\vspace{0.5cm}
\vspace{0.5cm}
# benchmark shrink penalizing beta lam_max = max(abs(crossprod(data$X, data$Y))) microbenchmark(shrink(X = data$X, Y = data$Y, B = cov(data$X, data$Y), lam.max = lam_max, lam.min.ratio = 1e-4, trace = "none"), times = 5)
\vspace{0.5cm}
\vspace{0.5cm}
# benchmark shrink penalizing beta and omega microbenchmark(shrink(X = data$X, Y = data$Y, B = cbind(cov(data$X, data$Y), diag(ncol(data$X))), lam.max = 10, lam.min.ratio = 1e-4, trace = "none"), times = 5)
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.