inst/doc/SummarizedBenchmark-Introduction.R

## ----echo=FALSE, include=FALSE------------------------------------------------
knitr::opts_chunk$set(tidy = FALSE, cache = TRUE, dev = "png",
                      message = FALSE, error = FALSE, warning = TRUE)

## -----------------------------------------------------------------------------
library("SummarizedBenchmark")
library("magrittr")

## -----------------------------------------------------------------------------
data(tdat)

## -----------------------------------------------------------------------------
head(tdat)

## -----------------------------------------------------------------------------
adj_bonf <- p.adjust(p = tdat$pval, method = "bonferroni")

adj_bh <- p.adjust(p = tdat$pval, method = "BH")

qv <- qvalue::qvalue(p = tdat$pval)
adj_qv <- qv$qvalues

## -----------------------------------------------------------------------------
adj <- cbind.data.frame(adj_bonf, adj_bh, adj_qv)
head(adj)

## -----------------------------------------------------------------------------
b <- BenchDesign(data = tdat)

## -----------------------------------------------------------------------------
b <- addMethod(bd = b, label = "bonf", func = p.adjust,
               params = rlang::quos(p = pval, method = "bonferroni"))

## -----------------------------------------------------------------------------
b <- b %>% 
    addMethod(label = "BH",
              func = p.adjust,
              params = rlang::quos(p = pval, method = "BH")) %>%
    addMethod(label = "qv",
              func = qvalue::qvalue,
              params = rlang::quos(p = pval),
              post = function(x) { x$qvalues })

## -----------------------------------------------------------------------------
b

## -----------------------------------------------------------------------------
printMethods(b)

## -----------------------------------------------------------------------------
sb <- buildBench(b, truthCols = "H")

## -----------------------------------------------------------------------------
head(assay(sb))

## -----------------------------------------------------------------------------
colData(sb)

## -----------------------------------------------------------------------------
rowData(sb)

## ----addPerformanceMetric-----------------------------------------------------
sb <- addPerformanceMetric(
  object = sb,
  assay = "H",
  evalMetric = "TPR",
  evalFunction = function(query, truth, alpha = 0.1) {
    goodHits <- sum((query < alpha) & truth == 1)
    goodHits / sum(truth == 1)
    }
)

performanceMetrics(sb)[["H"]]

## -----------------------------------------------------------------------------
resWide <- estimatePerformanceMetrics(sb, alpha = c(0.05, 0.1, 0.2))
resWide

## ----elWide-------------------------------------------------------------------
elementMetadata(resWide)

## -----------------------------------------------------------------------------
sb <- estimatePerformanceMetrics(sb, 
                                 alpha = c(0.05, 0.1, 0.2), 
                                 addColData = TRUE)
colData(sb)
elementMetadata(colData(sb))

## -----------------------------------------------------------------------------
estimatePerformanceMetrics(sb, 
                           alpha = c(0.05, 0.1, 0.2), 
                           tidy = TRUE)

## -----------------------------------------------------------------------------
head(tidyUpMetrics(sb))

## -----------------------------------------------------------------------------
tidyUpMetrics(sb) %>%
  dplyr:::filter(label == "bonf", alpha == 0.1, performanceMetric == "TPR") %>%
  dplyr:::select(value)

Try the SummarizedBenchmark package in your browser

Any scripts or data that you put into this service are public.

SummarizedBenchmark documentation built on Nov. 8, 2020, 8:30 p.m.