Benchmark | R Documentation |
Benchmark R6 class
Benchmark R6 class
.data
data.frame
is_complete
todo
contrast
column name
toscale
which columns to scale
avgInt
average Intensity
fcestimate
estimate column
benchmark
todo
model_description
describe model
model_name
model description
hierarchy
todo
smc
summarize missing contrasts
summarizeNA
statistic to use for missigness summarization (e.g. statistic, or p-value)
confusion
todo
species
todo
FDRvsFDP
todo
new()
create Benchmark
Benchmark$new( data, toscale = c("p.value"), fcestimate = "diff", avgInt = "avgInt", benchmark = list(list(score = "diff", desc = TRUE), list(score = "statistic", desc = TRUE), list(score = "scaled.p.value", desc = TRUE)), FDRvsFDP = list(list(score = "FDR", desc = FALSE)), model_description = "protein level measurments, linear model", model_name = "medpolish_lm", contrast = "contrast", species = "species", hierarchy = c("protein_Id"), summarizeNA = "statistic" )
data
data.frame
toscale
columns ot scale
fcestimate
column with fold change estimates
avgInt
average protein/peptide/metabolite intensity
benchmark
columns to benchmark
FDRvsFDP
score for which to generate FDR vs FDP
model_description
describe model
model_name
model name
contrast
contrast
species
species (todo rename)
hierarchy
e.g. protein_Id
summarizeNA
examine this column to determine the proportion of missing values default statistic
columns
to create FPR vs FDP analysis for
data()
get data
Benchmark$data()
data.frame
missing_contrasts()
summarize missing contrasts
Benchmark$missing_contrasts()
data.frame
complete()
set or get complete. If true only proteins for which all contrasts are determinable are examined.
Benchmark$complete(value)
value
TRUE if data should be complete (no missing contrasts)
.get_confusion()
get confusion data
Benchmark$.get_confusion(arrange)
arrange
todo
get_confusion_benchmark()
get FDR summaries
Benchmark$get_confusion_benchmark()
n_confusion_benchmark()
nr of elements used to determine ROC curve
Benchmark$n_confusion_benchmark()
plot_ROC()
plot FDR summaries
Benchmark$plot_ROC(xlim = 0.5)
xlim
limit x axis
ggplot
pAUC_summaries()
AUC summaries
Benchmark$pAUC_summaries()
pAUC()
AUC summaries as table
Benchmark$pAUC()
get_confusion_FDRvsFDP()
FDR vs FDP data
Benchmark$get_confusion_FDRvsFDP()
n_confusion_FDRvsFDP()
nr of elements used to determine ROC curve
Benchmark$n_confusion_FDRvsFDP()
plot_FDRvsFDP()
plot FDR vs FDP data
Benchmark$plot_FDRvsFDP()
ggplot
plot_score_distribution()
plot distributions of scores
Benchmark$plot_score_distribution(score)
score
the distribution of which scores to plot (list)
ggplot
plot_scatter()
plot intensity vs scores
Benchmark$plot_scatter(score)
score
the distribution of which scores to plot (list)
ggplot
plot_precision_recall()
plot precision vs recall
Benchmark$plot_precision_recall(precision_lim = 0.7, recall_lim = 1)
precision_lim
limit shown precision from
recall_lim
limit shown recall to
ggplot
clone()
The objects of this class are cloneable with this method.
Benchmark$clone(deep = FALSE)
deep
Whether to make a deep clone.
Other benchmarking:
INTERNAL_FUNCTIONS_BY_FAMILY
,
ionstar_bench_preprocess()
,
make_benchmark()
,
ms_bench_add_scores()
,
ms_bench_auc()
dd <- dplyr::filter(prolfqua_data('data_benchmarkExample'), !is.na(statistic))
dd <- dd |> dplyr::mutate(avgInt = (c1 + c2)/2)
ttd <- ionstar_bench_preprocess(dd)
medpol_benchmark <- make_benchmark(ttd$data,
benchmark = list(
list(score = "estimate", desc = TRUE),
list(score = "statistic", desc = TRUE),
list(score = "scaled.p.value", desc = TRUE)
),
fcestimate = "estimate",
model_description = "med. polish and lm. density",
model_name = "prot_med_lm"
)
medpol_benchmark$plot_score_distribution(list(list(score = "estimate", xlim = c(-1,2) ),
list(score = "statistic", xlim = c(-3,10) )))
medpol_benchmark$get_confusion_benchmark()
#Benchmark$debug("plot_score_distribution")
benchmark <- make_benchmark(
ttd$data,
toscale = c("moderated.p.value", "moderated.p.value.adjusted"),
fcestimate = "estimate",
benchmark = list(list(score = "estimate", desc = TRUE),
list(score = "statistic", desc = TRUE),
list(score = "scaled.moderated.p.value", desc = TRUE),
list(score = "scaled.moderated.p.value.adjusted", desc = TRUE)
),
FDRvsFDP =
list(list(score = "moderated.p.value", desc = FALSE),
list(score = "moderated.p.value.adjusted", desc = FALSE)),
model_description = "protein level measurments, lm model",
model_name = "prot_lm"
)
bb <- benchmark$pAUC_summaries()
benchmark$complete(FALSE)
benchmark$smc$summary
benchmark$plot_score_distribution(list(list(score = "estimate", xlim = c(-1,2) ),list(score = "statistic", xlim = c(-3,10) )))
benchmark$plot_score_distribution()
bb <- benchmark$get_confusion_FDRvsFDP()
xb <- dplyr::filter(bb, contrast == "dilution_(4.5/3)_1.5")
bb <- benchmark$get_confusion_benchmark()
benchmark$plot_ROC(xlim = 0.1)
benchmark$plot_precision_recall()
benchmark$plot_FDRvsFDP()
benchmark$plot_scatter(list(list(score = "estimate", ylim = c(-1,2) ),list(score = "statistic", ylim = c(-3,10) )))
benchmark$complete(FALSE)
benchmark$missing_contrasts()
stopifnot(nrow(benchmark$pAUC_summaries()$ftable$content) == 4 * (4 + 1))
benchmark$complete(TRUE)
stopifnot(nrow(benchmark$pAUC_summaries()$ftable$content) == 4 * (4+1))
missum <- benchmark$missing_contrasts()$summary
stopifnot(nrow(missum) == 4)
stopifnot(ncol(missum) == 2)
# returns number of statistics
stopifnot(nrow(benchmark$n_confusion_benchmark()) == 4 * (4 + 1))
stopifnot(nrow(benchmark$n_confusion_FDRvsFDP()) == 2 * (4 + 1))
benchmark$pAUC()
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.