library(knitr)
library(BBmisc)
opts_chunk$set(size = "footnotesize", echo = FALSE)
options(digits = 3L)
configureMlr(show.info = FALSE)
reqReactive = function(reac) {
  tryCatch(reac, error = function(err) NULL)
}
````

```r
# :D
df = data$data
sum.vis = reqReactive(summary.vis.collection)
eval.sum.vis = all(params$include$data, !is.null(sum.vis$var.plots))
params$titles$data = ifelse(params$include$data, params$titles$data, "")
# data.default.text = stri_paste()

r params$titles$data

r params$passage$data

kable(summarizeColumns(df))
plot.titles = names(sum.vis$var.plots)
for (i in seq_along(plot.titles)) {
  q = sum.vis$var.plots[[plot.titles[i]]]
  q = q + ggtitle(plot.titles[i])
  print(q)
}

\newpage

task = reqReactive(task.out())
params$include$task = all(params$include$task, !is.null(task))
params$titles$task = ifelse(params$include$task, params$titles$task, "")
task.type = getTaskType(task)
out.task = ""

r params$titles$task

r params$passage$task

#FIXME: make task dep
tar.name = knit_expand(text="**Target name:** {{getTaskTargetNames(task)}} \n")
n.obs = knit_expand(text="**Number of observations:** {{getTaskSize(task)}} \n")
n.feats = knit_expand(text="**Number of features:** {{getTaskNFeats(task)}} \n")
out.task = c(tar.name, n.obs, n.feats)
if (task.type == "classif") {
  tar.levs = paste("**Target levels:** ", collapse(getTaskClassLevels(task), sep = ", "), "\n")
  tar.levs = knit_expand(text = tar.levs)
  out.task = c(out.task, tar.levs)
}

r paste(knit(text = out.task), collapse = '\n')

\newpage

learners = reqReactive(learners())
learners.par.sets = reqReactive(learners.par.sets())
eval.learners = all(!is.null(learners), params$include$learners)
params$include$learners = all(params$include$learners, eval.learners)
params$titles$learners = ifelse(params$include$learners,
  params$titles$learners, "")
out.lrns = ""

r params$titles$learners

r params$passage$learners

# FIXME: Put in datatables for HTML files when we implement interactive code chunks
out.lrns = NULL
lrn.names = names(learners)
for (i in seq_along(learners.par.sets)) {
  par.set = ParamHelpers:::getParSetPrintData(learners.par.sets[[i]])
  lrn.name = knit_expand(text="### {{lrn.names[i]}} \n")
  if (input$report.format == "PDF") {
    tab = knit_expand(text="{{kable(par.set)}} \n")
    # cat("\n")
  } else {
    # DT::datatable(par.set)
    tab = kable(par.set)
    # cat("\n")
  }
  out.lrns = c(out.lrns, lrn.name, tab)
}

r paste(knit(text = out.lrns), collapse = '\n')

\newpage

model = reqReactive(model())
test.df = data$data.test
train.learner = reqReactive(train.learner())
measures.perf = reqReactive(measures.perf())
# print(measures.perf)
if (is.null(measures.perf) | length(measures.perf) == 0L)
  measures.perf = getDefaultMeasure(task)
if (!is.null(class(model)) & class(model) == "WrappedModel") {
 preds.on.task = predict(model, task)
  perf.on.task = performance(preds.on.task, measures.perf, model = model, task = task) 
  if (class(test.df) == "data.frame") {
    preds.on.test = predict(model, newdata = test.df)
    perf.on.test = performance(preds.on.test, measures.perf,
      model = model, task = task)
  }
}



eval.model = all(!is.null(model), !is.null(measures.perf))
params$include$modelling = all(params$include$modelling, eval.model)
pred.plots = reqReactive(prediction.plot.collection)
# pred.plots = pred.plots$pred.plots
eval.pred.plots = all(params$include$modelling, !is.null(pred.plots))
params$titles$modelling = ifelse(params$include$modelling,
  params$titles$modelling, "")
out.mod = ""

r params$titles$modelling

r params$passage$modelling

if (class(model) == "WrappedModel") {
  #FIXME: make task dep
  lrn.name = knit_expand(text="**Trained learner:** {{train.learner$name}} \n")
  mod.text.perf = knit_expand(text = "###Performance on task: \n")
  mod.task.perf = kable(as.data.frame(perf.on.task))
  out.mod = c(lrn.name, mod.text.perf, mod.task.perf)

  if (class(test.df) == "data.frame") {
    mod.text.test = knit_expand(text = "###Performance on test set: \n")
    mod.test.perf = kable(as.data.frame(perf.on.test))
    out.mod = c(out.mod, "\n", mod.text.test, mod.test.perf)
  }  
}

if (is.null(out.mod) | length(out.mod) == 0L)
  out.mod = ""

r knit(text = out.mod)

# summarizeColumns(preds.on.test)

\newpage

plot.titles = names(pred.plots$pred.plots)
for (i in seq_along(plot.titles)) {
  q = pred.plots$pred.plots[[plot.titles[i]]]
  # q = q + ggtitle(plot.titles)
  print(q)
}

\newpage

tune.res = reqReactive(tuning())
# measures.perf = reqReactive(measures.perf())
eval.tuning = all(!is.null(tune.res), !is.null(params$include$tuning))
params$include$tuning = all(params$include$tuning, eval.tuning)
params$titles$tuning = ifelse(params$include$tuning,
  params$titles$tuning, "")

r params$titles$tuning

r params$passage$tuning

tune.res$x
tune.res$y

\newpage

bmr = reqReactive(bmr())
# measures.perf = reqReactive(measures.perf())
eval.benchmark = all(!is.null(bmr), !is.null(params$include$benchmark))
params$include$benchmark = all(params$include$benchmark, eval.benchmark)
bmr.plots = reqReactive(bmr.plots.collection)
eval.bmr.plots = all(params$include$benchmark, !is.null(bmr.plots))
params$titles$benchmark = ifelse(params$include$benchmark,
  params$titles$benchmark, "")

r params$titles$benchmark

r params$passage$benchmark

if (class(bmr) == "BenchmarkResult") {
  out.bmr = NULL
  bmr.tab = getBMRAggrPerformances(bmr, as.df = TRUE)
  # bmr.tab = unlist(bmr.tab, recursive = FALSE)
  # for (i in seq_along(bmr.tab)) {
  #   tab = as.data.frame(bmr.tab[[i]])
  #       colnames(tab) = names(learners)[[i]]
  #   tab = knit_expand(text="{{kable(tab)}} \n")
  #   out.bmr = c(out.bmr, tab)
  # }
  out.bmr = knit_expand(text="{{kable(bmr.tab)}} \n")
} else {
  out.bmr = ""
}

r paste(knit(text = out.bmr), collapse = '\n')

for (i in seq_along(bmr.plots$plot.titles)) {
  q = bmr.plots$bmr.plots[[bmr.plots$plot.titles[i]]]
  print(q)
}


svcuong/PdM documentation built on Aug. 21, 2020, 2:48 p.m.