knitr::opts_chunk$set(
  fig.retina = 3,
  fig.align = "center",
  fig.width = 6.93,
  fig.height = 6.13,
  out.width = "70%",
  echo = FALSE
)

options(
  scipen = 999
)

R.utils::sourceDirectory("R")
library("drake")

# load drake objects
loadd(
  bm_aggregated,

  # for T4
  benchmark_tune_results_hr_nri_vi
)

library("xtable")
library("flextable")
library("ggbeeswarm")
library("ggsci")
library("ggrepel")
library("ggpubr")
library("here")
library("mlr")
library("dplyr")
library("forcats")

Last update:

date()
df_perf <- getBMRPerformances(bm_aggregated, as.df = TRUE) %>%
  mutate(task.id = recode_factor(task.id,
    `hr` = "HR",
    `vi` = "VI",
    `nri` = "NRI",
    `nri_vi` = "NRI-VI",
    `hr_nri` = "HR-NRI",
    `hr_vi` = "HR-VI",
    `hr_nri_vi` = "HR-NRI-VI",
  )) %>%
  tidyr::separate(learner.id, c("learner_group", "filter"),
    remove = FALSE,
    sep = " MBO ",
    fill = "right",
  ) %>%
  mutate(learner_group = recode_factor(learner_group,
    `XGBOOST` = "XGBoost"
  ))
# Aggregate performances and add standard error column.
df_perf %<>%
  dplyr::group_by(task.id, learner.id, filter) %>%
  dplyr::mutate(rmse_aggr = round(mean(rmse), 3)) %>%
  dplyr::mutate(se = round(sd(rmse), 3)) %>%
  dplyr::select(-rmse, iter) %>%
  dplyr::ungroup()

Fold performances of "SVM MBO No Filter" on the HR Task

df <- mlr::getBMRPerformances(bm_aggregated,
  "hr", "SVM MBO No Filter",
  as.df = TRUE
)
df_tab <- df %>%
  dplyr::select(iter, rmse) %>%
  dplyr::rename(Plot = iter, RMSE = rmse) %>%
  dplyr::mutate(`Test Plot` = as.character(Plot)) %>%
  dplyr::mutate(`Test Plot` = forcats::fct_recode(`Test Plot`,
    Laukiz1 = "1", Laukiz2 = "2",
    Luiando = "3", Oiartzun = "4"
  )) %>%
  dplyr::select(-Plot) %>%
  dplyr::mutate(RMSE = round(RMSE, 2))

df_tab %>%
  xtable::xtable(
    type = "latex",
    caption = "Test fold performances in RMSE (p.p.) for learner SVM on the HR dataset without using a filter, showcasing performance variance on the fold level. For each row, the model was trained on observations from all others plots but the given one and tested on the observations of the given plot.",
    label = "tab:svm-single-fold-perf"
  ) %>%
  print(
    file = here::here("docs/00-manuscripts/mdpi/performance-svm-single-plot.tex"),
    include.rownames = TRUE,
    latex.environments = c("center"),
    table.placement = "ht!",
    caption.placement = "top",
    timestamp = NULL
  )

saveRDS(df_tab, here("docs/00-manuscripts/presentation/table-svm-single-plot.rda"))

DT::datatable(df_tab)

(Table) T1 All leaner/filter/task combinations ordered by performance.

Overall leaderboard across all settings, sorted ascending by performance.

table1 <- df_perf %>%
  group_by(learner.id, task.id, filter) %>%
  slice(which.min(rmse_aggr)) %>%
  dplyr::rename(
    "Model" = learner_group,
    "Learner ID" = learner.id,
    "Task" = task.id,
    "Filter" = filter,
    "RMSE" = rmse_aggr,
    "SE" = se
  ) %>%
  ungroup() %>%
  mutate(Filter = replace(Filter, is.na(Filter), "No Filter")) %>%
  select(-iter, -`Learner ID`, -rsq) %>%
  relocate(RMSE, .after = Filter) %>%
  relocate(`SE`, .after = RMSE) %>%
  mutate(RMSE = round(RMSE, 3)) %>%
  arrange(RMSE)

# save as latex table
table1 %>%
  ungroup() %>%
  arrange(RMSE) %>%
  slice(1:10) %>%
  xtable(
    type = "latex",
    caption = "Best ten results among all learner-task-filter combinations, sorted in decreasing order of RMSE (p.p.) and their respective standard error (SE).",
    label = "tab:perf-top-10",
    digits = 3
  ) %>%
  print(
    file = here("docs/00-manuscripts/mdpi/performance-top-10.tex"),
    include.rownames = TRUE,
    latex.environments = c("center"),
    table.placement = "ht!",
    caption.placement = "top",
    timestamp = NULL,
    sanitize.text.function = function(x) {
      x
    }
  )

saveRDS(table1, here("docs/00-manuscripts/presentation/table-perf-top-10.rda"))

DT::datatable(table1)

(Table) T2 Best learner/filter/task combination

Learners: On which task and using which filter did every learner score their best result on?

*CV: L2 penalized regression using the internal 10-fold CV tuning of the glmnet package

*MBO: L2 penalized regression using using MBO for hyperparameter optimization.

table2 <- df_perf %>%
  group_by(learner_group) %>%
  slice(which.min(rmse_aggr)) %>%
  mutate(filter = replace(filter, is.na(filter), "No Filter")) %>%
  arrange(rmse_aggr) %>%
  # remove Task info for featureless.learner
  dplyr::mutate(task.id = case_when(learner.id == "regr.featureless" ~ "-", TRUE ~ as.character(task.id))) %>%
  dplyr::rename(
    "Model" = learner_group,
    "Learner ID" = learner.id,
    "Task" = task.id,
    "Filter" = filter,
    "RMSE" = rmse_aggr,
    "SE" = se
  ) %>%
  select(-`Learner ID`, -rsq) %>%
  relocate(RMSE, .after = Filter) %>%
  relocate(`SE`, .after = RMSE) %>%
  select(-iter)



# save as latex table
table2 %>%
  xtable(
    type = "latex",
    caption = "The overall best individual learner performance across any task and filter method for RF, SVM, XGBoost, Lasso and Ridge, sorted ascending by RMSE (p.p.) including the respective standard error (SE) of the cross-validation run. For \texttt{regr.featureless} the Task is no applicable and was therefore removed.",
    label = "tab:best-learner-perf",
    digits = 3
  ) %>%
  print(
    file = here("docs/00-manuscripts/mdpi/performance-best-per-learner.tex"),
    include.rownames = TRUE,
    latex.environments = c("center"),
    table.placement = "ht!",
    scalebox = 0.90,
    caption.placement = "top",
    timestamp = NULL,
    sanitize.text.function = function(x) {
      x
    }
  )

saveRDS(table2, here("docs/00-manuscripts/presentation/table-best-learner-per-task.rda"))

DT::datatable(table2)

(Table) T3 All leaner/filter/task combinations ordered descending by performance.

Overall leaderboard across all settings, sorted descending by performance.

table3 <- df_perf %>%
  group_by(learner.id, task.id, filter) %>%
  slice(which.min(rmse_aggr)) %>%
  dplyr::rename(
    "Model" = learner_group,
    "Learner ID" = learner.id,
    "Task" = task.id,
    "Filter" = filter,
    "RMSE" = rmse_aggr,
    "SE" = se,
  ) %>%
  ungroup() %>%
  mutate(Filter = replace(Filter, is.na(Filter), "No Filter")) %>%
  select(-iter, -`Learner ID`, -rsq) %>%
  relocate(RMSE, .after = Filter) %>%
  relocate(`SE`, .after = RMSE) %>%
  mutate(RMSE = round(RMSE, 3)) %>%
  arrange(RMSE)

# save as latex table
table3 %>%
  ungroup() %>%
  arrange(desc(RMSE)) %>%
  slice(1:10) %>%
  xtable(
    type = "latex",
    caption = "Worst ten results among all learner-task-filter combinations, sorted in decreasing order of RMSE (p.p.) and their respective standard error (SE).",
    label = "tab:perf-worst-10",
    digits = 3
  ) %>%
  print(
    file = here("docs/00-manuscripts/mdpi/performance-worst-10.tex"),
    include.rownames = TRUE,
    latex.environments = c("center"),
    table.placement = "ht!",
    caption.placement = "top",
    timestamp = NULL,
    sanitize.text.function = function(x) {
      x
    }
  )

saveRDS(table3, here("docs/00-manuscripts/presentation/table-perf-worst-10.rda"))

DT::datatable(table3)

(Plot) P1 Best learner/filter combs for all tasks

results_aggr <- df_perf %>%
  filter(learner.id != "regr.featureless") %>%
  mutate(filter = replace(filter, is.na(filter), "NF")) %>%
  mutate(learner_group = recode_factor(learner_group, `XG` = "XGBoost")) %>%
  mutate(learner_group = recode_factor(learner_group, `Ridge-MBO` = "Ridge")) %>%
  mutate(learner_group = recode_factor(learner_group, `Lasso-MBO` = "Lasso")) %>%
  # reorder by min RMSE (reviewer request)
  group_by(learner_group, filter, task.id) %>%
  ### get the best performance per learner and task
  # this group_by() & slice() approach is better than summarise() because we can
  # keep additional columns
  # in constrast to summarise which only keeps the grouping columns and the
  # summarised one
  slice(which.min(rmse_aggr)) %>% # this groups the CV iters
  ungroup() %>%
  group_by(task.id, learner_group) %>%
  slice(which.min(rmse_aggr)) %>%
  ungroup() %>%
  mutate(learner_group = fct_reorder(learner_group, .$rmse_aggr, min))

results_aggr %>%
  ggplot(aes(x = rmse_aggr, y = task.id)) +
  # geom_jitter(aes(color = learner_group), size = 2, width = 0, height = 0.3) +
  # geom_dotplot(aes(fill = learner_group), binaxis="y",
  #                        stackdir="up") +
  ggbeeswarm2::geom_beeswarm(groupOnX = FALSE, aes(color = learner_group), size = 2.5) +
  # scale_color_nejm(breaks = sort(levels(results_aggr$learner_group))) +
  # scale_color_viridis_d(breaks = sort(levels(results_aggr$learner_group))) +
  scale_color_viridis_d(breaks = results_aggr$learner_group) +
  labs(x = "RMSE", y = "Task", color = "Learner") +
  guides(size = FALSE) +
  scale_x_continuous(limits = c(27, 40), breaks = seq(28, 40, 2)) +
  geom_label_repel(
    # subset data to remove out of bounds values
    data = results_aggr[results_aggr$rmse_aggr < 100, ],
    # from ggbeeswarm, avoid overlapping of points by labels
    position = position_quasirandom(),
    aes(label = paste0(filter, ",", round(rmse_aggr, 3))),
    size = 4,
    min.segment.length = 0.1,
    seed = 123,
    point.padding = 0.5
  ) +
  theme_pubr(base_size = 14) +
  theme(
    panel.grid.major.y = element_line(size = 0.1, linetype = "dashed"),
    axis.title.y = element_blank(),
    legend.text = element_text(size = 13),
    legend.title = element_text(size = 13),
    axis.text.y = element_text(angle = 45),
    plot.margin = unit(c(6, 6, 6, 0), "pt")
  )

(Plot) P2 Scatterplots of filter methods vs. no filter for each learner and task

Showing the final effect of applying feature selection to a learner for each task. All filters are colored in the same way whereas using "no filter" appears in a different color.

results_aggr1 <- df_perf %>%
  filter(learner_group != "Ridge-MBO") %>%
  filter(learner_group != "Lasso-MBO") %>%
  filter(learner.id != "regr.featureless") %>%
  # mutate(filter = replace(filter, "No Filter", "NF")) %>%
  mutate(learner_group = as.factor(learner_group)) %>%
  mutate(learner_group = recode(learner_group, `XGBoost` = "XG")) %>%
  mutate(filter = recode(filter, `No Filter` = "NF")) %>%
  mutate(learner_group = fct_rev(learner_group)) %>%
  group_by(learner_group, task.id, filter) %>%
  # we actually took the mean already in chunk 'aggr-perf'. This is only to get
  # summarise() working
  summarise(perf = mean(rmse_aggr)) %>%
  ungroup() %>%
  # we need to reverse the order on purpose here so that ggplot reverses it
  # again later
  mutate(learner_group = fct_relevel(learner_group, "XG", "SVM", "RF"))

results_aggr1 %>%
  ggplot(aes(x = perf, y = learner_group)) +
  ggbeeswarm2::geom_beeswarm(
    data = results_aggr1[results_aggr1$filter != "NF", ], size = 2.2, shape = 3,
    groupOnX = FALSE, aes(color = "Filter")
  ) +
  geom_point(
    data = results_aggr1[results_aggr1$filter == "NF", ],
    size = 2.2, shape = 19, aes(color = "No Filter")
  ) +
  facet_wrap(~task.id) +
  scale_color_nejm(guide = guide_legend(override.aes = list(shape = c(3, 19)))) +
  labs(x = "RMSE", y = "Task", colour = NULL) +
  guides(size = FALSE) +
  scale_x_continuous(limits = c(27, 51)) +
  theme_pubr(base_size = 14) +
  theme(
    panel.grid.major.y = element_line(size = 0.1, linetype = "dashed"),
    axis.title.y = element_blank(),
    legend.text = element_text(size = 14),
    legend.title = element_text(size = 14),
  )

(Plot) P3 Scatterplots of filter methods vs. Borda for each learner and task

Showing the final effect of applying feature selection to a learner for each task. All filters are summarized into a a single color whereas the "Borda" filter appears in its own color.

results_aggr2 <- df_perf %>%
  na.omit() %>%
  filter(learner_group != "Ridge-MBO") %>%
  filter(learner_group != "Lasso-MBO") %>%
  filter(learner.id != "regr.featureless") %>%
  mutate(learner_group = recode_factor(learner_group, `XGBoost` = "XG")) %>%
  group_by(learner_group, task.id, filter) %>%
  # we actually took the mean already in chunk 'aggr-perf'. This is only to get
  # summarise() working
  summarise(perf = mean(rmse_aggr)) %>%
  ungroup() %>%
  # we need to reverse the order on porpuse here so that ggplot reverses it
  # again later
  mutate(learner_group = fct_relevel(learner_group, "XG", "SVM", "RF"))

results_aggr2 %>%
  ggplot(aes(x = perf, y = learner_group)) +
  ggbeeswarm2::geom_beeswarm(
    data = results_aggr2[results_aggr2$filter != "Borda", ],
    shape = 3, size = 2.2, aes(color = "Filter"),
    groupOnX = FALSE
  ) +
  geom_point(
    data = results_aggr2[results_aggr2$filter == "Borda", ],
    shape = 19, size = 2.2, aes(color = "Borda Filter")
  ) +
  facet_wrap(~task.id) +
  scale_color_manual(
    guide = guide_legend(override.aes = list(shape = c(19, 3))),
    values = c(
      "Filter" = "#BC3C29FF",
      "Borda Filter" = "#0072B5FF"
    )
  ) +
  labs(x = "RMSE", y = "Task", colour = NULL) +
  guides(size = FALSE) +
  scale_x_continuous(limits = c(27, 51)) +
  theme_pubr(base_size = 14) +
  theme(
    panel.grid.major.y = element_line(size = 0.1, linetype = "dashed"),
    axis.title.y = element_blank(),
    legend.text = element_text(size = 14),
    legend.title = element_text(size = 14),
  )

(Table) T4 Number of features selected during tuning

The model/task combinations which were selected relate to the best performance of the respective algorithm on the HR-NRI-VI task in the overall benchmark.

Fold IDs are different for each learner, i.e. a specific plot does not always resolve to "fold 1" for each learner. See bmr_inspect_tune[["results"]][["hr_nri_vi"]][["RF MBO Relief"]][["pred"]][["instance"]][["test.inds"]].

Thus, we need to manually label the fold IDs to plot names for each learner.

Example for RF on fold 1 (Luiando):

RF

SVM

XGBoost

bmr_inspect_tune <- mergeBenchmarkResults(benchmark_tune_results_hr_nri_vi)

df_tbl <- getBMRTuneResults(bmr_inspect_tune, as.df = TRUE) %>%
  # replace iter IDs by individual fold names
  dplyr::mutate(iter = as.character(iter)) %>%
  dplyr::mutate(iter = case_when(
    learner.id == "XGBOOST MBO Borda" & iter == 1 ~ "Luiando",
    learner.id == "XGBOOST MBO Borda" & iter == 2 ~ "Laukiz1",
    learner.id == "XGBOOST MBO Borda" & iter == 3 ~ "Oiartzun",
    learner.id == "XGBOOST MBO Borda" & iter == 4 ~ "Laukiz2",

    learner.id == "SVM MBO Car" & iter == 1 ~ "Laukiz1",
    learner.id == "SVM MBO Car" & iter == 2 ~ "Oiartzun",
    learner.id == "SVM MBO Car" & iter == 3 ~ "Luiando",
    learner.id == "SVM MBO Car" & iter == 4 ~ "Laukiz2",

    learner.id == "RF MBO Car" & iter == 1 ~ "Luiando",
    learner.id == "RF MBO Car" & iter == 2 ~ "Laukiz2",
    learner.id == "RF MBO Car" & iter == 3 ~ "Laukiz1",
    learner.id == "RF MBO Car" & iter == 4 ~ "Oiartzun",

    TRUE ~ as.character(iter)
  )) %>%
  dplyr::rename(Learner = learner.id) %>%
  dplyr::rename(Plot = iter) %>%
  dplyr::rename(RMSE = rmse.test.mean) %>%
  dplyr::mutate(fw.perc = fw.perc * 100) %>%
  dplyr::rename("Features (\\%)" = fw.perc) %>%
  dplyr::mutate(`Test Plot` = as.character(Plot)) %>%
  dplyr::mutate(Learner = forcats::fct_recode(Learner,
    "RF \\\\ Car" = "RF MBO Car", "XGB \\\\ Borda" = "XGBOOST MBO Borda",
    "SVM \\\\ Car" = "SVM MBO Car"
  )) %>%
  dplyr::mutate(Learner = as.character(Learner)) %>%
  dplyr::mutate(`Test Plot` = as.character(`Test Plot`)) %>%
  dplyr::mutate("Features (\\#)" = case_when(
    # numbers here are the respective sums of the training sets (without the mentioned plot acting as the test set)
    `Test Plot` == "Laukiz1" ~ paste0(as.character(ceiling((`Features (\\%)` * 1249) / 100)), "/1249"),
    `Test Plot` == "Laukiz2" ~ paste0(as.character(ceiling((`Features (\\%)` * 1357) / 100)), "/1357"),
    `Test Plot` == "Luiando" ~ paste0(as.character(ceiling((`Features (\\%)` * 1507) / 100)), "/1507"),
    `Test Plot` == "Oiartzun" ~ paste0(as.character(ceiling((`Features (\\%)` * 1311) / 100)), "/1311"),
  )) %>%
  dplyr::group_by(Learner) %>%
  dplyr::arrange(Learner, `Test Plot`) %>%
  dplyr::select(Learner, `Test Plot`, "Features (\\%)", "Features (\\#)")

rle.lengths <- rle(df_tbl[[1]])$lengths
first <- !duplicated(df_tbl[[1]])
df_tbl[[1]][!first] <- ""

# define appearance of \multirow
df_tbl[[1]][first] <-
  paste0("\\midrule\\multirow{", rle.lengths, "}{*}{\\specialcell{", df_tbl[[1]][first], "}}")
# remove redundant midrule from first entry
df_tbl[[1]][first][1] <- gsub("\\\\midrule", "", df_tbl[[1]][first][1])

table4 <- df_tbl %>%
  xtable::xtable(
    type = "latex",
    caption = "Selected feature portions during tuning for the best performing learner-filter settings (SVM Relief, RF Relief, XGBoost CMIM) across folds for task HR-NRI-VI, sorted by plot name. 'Features (\\texttt{\\#})' denotes the absolute number of features selected and 'Features (\\texttt{\\%})' refers to the percentage relative to the overall features available in the training sets for each plot (Laukiz1 = 1249, Laukiz2 = 1357, Luiando = 1507, Oiartzun = 1311). Results were estimated in a separate model tuning step, not within the main cross-validation comparison.",
    label = "tab:tune-perc-sel-features",
    digits = c(0, 0, 0, 5, 0)
  )

# save to file
table4 %>%
  print(
    file = here("docs/00-manuscripts/mdpi/tune-perc-sel-features.tex"),
    include.rownames = FALSE,
    latex.environments = c("center"),
    table.placement = "ht!",
    caption.placement = "top",
    timestamp = NULL,
    booktabs = TRUE,
    # important to treat content of multirow as latex content
    sanitize.text.function = force
  )

saveRDS(table4, here("docs/00-manuscripts/presentation/tune-perc-sel-feature.rda"))

DT::datatable(table4)

Aggregated mean and standard deviation:

getBMRTuneResults(bmr_inspect_tune, as.df = TRUE) %>%
  dplyr::rename(Learner = learner.id) %>%
  dplyr::rename(Plot = iter) %>%
  dplyr::rename(RMSE = rmse.test.mean) %>%
  dplyr::rename("Features (%)" = fw.perc) %>%
  dplyr::mutate(Plot = as.character(Plot)) %>%
  dplyr::mutate(Plot = forcats::fct_recode(Plot,
    Oiartzun = "4", Luiando = "3",
    Laukiz1 = "1", Laukiz2 = "2"
  )) %>%
  dplyr::mutate(Learner = forcats::fct_recode(Learner,
    "RF \\\\ Relief" = "RF MBO Relief", "XGB \\\\ CMIM" = "XGBOOST MBO CMIM",
    "SVM \\\\ Relief" = "SVM MBO Relief"
  )) %>%
  dplyr::mutate(Learner = as.character(Learner)) %>%
  dplyr::mutate(Plot = as.character(Plot)) %>%
  dplyr::group_by(Learner) %>%
  dplyr::summarise(
    "Mean (Features (%))" = mean(`Features (%)`),
    "SD (Features (%))" = sd(`Features (%)`)
  ) %>%
  DT::datatable()


pat-s/2019-feature-selection documentation built on Dec. 24, 2021, 8:40 a.m.