knitr::opts_chunk$set( comment = "#>", collapse = TRUE, out.width = "70%", fig.align = "center", fig.width = 6, fig.asp = .618 ) orig_opts <- options("digits") options(digits = 3) set.seed(1)
library(bvhar)
Given VAR coefficient and VHAR coefficient each,
sim_var(num_sim, num_burn, var_coef, var_lag, sig_error, init)
generates VAR processsim_vhar(num_sim, num_burn, vhar_coef, sig_error, init)
generates VHAR processWe use coefficient matrix estimated by VAR(5) in introduction vignette.
etf_eval <- etf_vix |> dplyr::select(GVZCLS, OVXCLS, EVZCLS, VXFXICLS) |> divide_ts(20) etf_train <- etf_eval$train etf_test <- etf_eval$test ex_fit <- var_lm(etf_train, p = 5)
Consider
coef(ex_fit) ex_fit$covmat
Then
m <- ncol(ex_fit$coefficients) # generate VAR(5)----------------- y <- sim_var( num_sim = 1500, num_burn = 100, var_coef = coef(ex_fit), var_lag = 5L, sig_error = ex_fit$covmat, init = matrix(0L, nrow = 5L, ncol = m) ) # colname: y1, y2, ...------------ colnames(y) <- paste0("y", 1:m) head(y)
h <- 20 y_eval <- divide_ts(y, h) y_train <- y_eval$train # train y_test <- y_eval$test # test
# VAR(5) model_var <- var_lm(y_train, 5) # VHAR model_vhar <- vhar_lm(y_train)
Minnesota prior
# hyper parameters--------------------------- y_sig <- apply(y_train, 2, sd) # sigma vector y_lam <- .2 # lambda y_delta <- rep(.2, m) # delta vector (0 vector since RV stationary) eps <- 1e-04 # very small number spec_bvar <- set_bvar(y_sig, y_lam, y_delta, eps) # fit--------------------------------------- model_bvar <- bvar_minnesota(y_train, p = 5, bayes_spec = spec_bvar)
BVHAR-S
spec_bvhar_v1 <- set_bvhar(y_sig, y_lam, y_delta, eps) # fit--------------------------------------- model_bvhar_v1 <- bvhar_minnesota(y_train, bayes_spec = spec_bvhar_v1)
BVHAR-L
# weights---------------------------------- y_day <- rep(.1, m) y_week <- rep(.01, m) y_month <- rep(.01, m) # spec------------------------------------- spec_bvhar_v2 <- set_weight_bvhar( y_sig, y_lam, eps, y_day, y_week, y_month ) # fit-------------------------------------- model_bvhar_v2 <- bvhar_minnesota(y_train, bayes_spec = spec_bvhar_v2)
You can forecast using predict()
method with above objects.
You should set the step of the forecasting using n_ahead
argument.
In addition, the result of this forecast will return another class called predbvhar
to use some methods,
autoplot.predbvhar()
mse.predbvhar()
, mae.predbvhar()
, mape.predbvhar()
, mase.predbvhar()
, mrae.predbvhar()
, relmae.predbvhar()
rmape.predbvhar()
, rmase.predbvhar()
, rmase.predbvhar()
, rmsfe.predbvhar()
, rmafe.predbvhar()
(pred_var <- predict(model_var, n_ahead = h))
class(pred_var) names(pred_var)
The package provides the evaluation function
mse(predbvhar, test)
: MSEmape(predbvhar, test)
: MAPE(mse_var <- mse(pred_var, y_test))
(pred_vhar <- predict(model_vhar, n_ahead = h))
MSE:
(mse_vhar <- mse(pred_vhar, y_test))
(pred_bvar <- predict(model_bvar, n_ahead = h))
MSE:
(mse_bvar <- mse(pred_bvar, y_test))
(pred_bvhar_v1 <- predict(model_bvhar_v1, n_ahead = h))
MSE:
(mse_bvhar_v1 <- mse(pred_bvhar_v1, y_test))
(pred_bvhar_v2 <- predict(model_bvhar_v2, n_ahead = h))
MSE:
(mse_bvhar_v2 <- mse(pred_bvhar_v2, y_test))
autoplot(predbvhar)
and autolayer(predbvhar)
draws the results of the forecasting.
autoplot(pred_var, x_cut = 1470, ci_alpha = .7, type = "wrap") + autolayer(pred_vhar, ci_alpha = .5) + autolayer(pred_bvar, ci_alpha = .4) + autolayer(pred_bvhar_v1, ci_alpha = .2) + autolayer(pred_bvhar_v2, ci_alpha = .1) + geom_eval(y_test, colour = "#000000", alpha = .5)
Mean of MSE
list( VAR = mse_var, VHAR = mse_vhar, BVAR = mse_bvar, BVHAR1 = mse_bvhar_v1, BVHAR2 = mse_bvhar_v2 ) |> lapply(mean) |> unlist() |> sort()
For each variable, we can see the error with plot.
list( pred_var, pred_vhar, pred_bvar, pred_bvhar_v1, pred_bvhar_v2 ) |> gg_loss(y = y_test, "mse")
Relative MAPE (MAPE), benchmark model: VAR
list( VAR = pred_var, VHAR = pred_vhar, BVAR = pred_bvar, BVHAR1 = pred_bvhar_v1, BVHAR2 = pred_bvhar_v2 ) |> lapply(rmape, pred_bench = pred_var, y = y_test) |> unlist()
In time series research, out-of-sample forecasting plays a key role. So, we provide out-of-sample forecasting function based on
forecast_roll(object, n_ahead, y_test)
forecast_expand(object, n_ahead, y_test)
forecast_roll(object, n_ahead, y_test)
conducts h >= 1 step rolling windows forecasting.
It fixes window size and moves the window. The window is the training set. In this package, we set window size = original input data.
Iterating the step
num_test - h + 1
.5-step out-of-sample:
(var_roll <- forecast_roll(model_var, 5, y_test))
Denote that the nrow is longest forecast horizon.
class(var_roll) names(var_roll)
To apply the same evaluation methods, a class named bvharcv
has been defined. You can use the functions above.
vhar_roll <- forecast_roll(model_vhar, 5, y_test) bvar_roll <- forecast_roll(model_bvar, 5, y_test) bvhar_roll_v1 <- forecast_roll(model_bvhar_v1, 5, y_test) bvhar_roll_v2 <- forecast_roll(model_bvhar_v2, 5, y_test)
Relative MAPE, benchmark model: VAR
list( VAR = var_roll, VHAR = vhar_roll, BVAR = bvar_roll, BVHAR1 = bvhar_roll_v1, BVHAR2 = bvhar_roll_v2 ) |> lapply(rmape, pred_bench = var_roll, y = y_test) |> unlist()
forecast_expand(object, n_ahead, y_test)
conducts h >= 1 step expanding window forecasting.
Different with rolling windows, expanding windows method fixes the starting point. The other is same.
(var_expand <- forecast_expand(model_var, 5, y_test))
The class is bvharcv
.
class(var_expand) names(var_expand)
vhar_expand <- forecast_expand(model_vhar, 5, y_test) bvar_expand <- forecast_expand(model_bvar, 5, y_test) bvhar_expand_v1 <- forecast_expand(model_bvhar_v1, 5, y_test) bvhar_expand_v2 <- forecast_expand(model_bvhar_v2, 5, y_test)
Relative MAPE, benchmark model: VAR
list( VAR = var_expand, VHAR = vhar_expand, BVAR = bvar_expand, BVHAR1 = bvhar_expand_v1, BVHAR2 = bvhar_expand_v2 ) |> lapply(rmape, pred_bench = var_expand, y = y_test) |> unlist()
options(orig_opts)
Any scripts or data that you put into this service are public.
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.