tests/testthat/test-prob-average_precision.R

test_that("Calculations are correct - two class", {
  curve <- pr_curve_vec(two_class_example$truth, two_class_example$Class1)

  recalls <- curve[["recall"]]
  precisions <- curve[["precision"]]

  expect_equal(
    average_precision_vec(two_class_example$truth, two_class_example$Class1),
    sum(diff(recalls) * precisions[-1])
  )
})

test_that("Calculations are correct - multi class", {
  expect_equal(
    average_precision(hpc_cv, obs, VF:L)[[".estimate"]],
    0.623566079
  )
})

test_that("Calculations handles NAs", {
  hpc_cv$VF[1:10] <- NA

  expect_equal(
    average_precision(hpc_cv, obs, VF:L)[[".estimate"]],
    0.623365730
  )

  expect_equal(
    average_precision(hpc_cv, obs, VF:L, na_rm = FALSE)[[".estimate"]],
    NA_real_
  )
})

test_that("Case weights calculations are correct", {
  py <- read_pydata("py-average-precision")

  two_class_example$weight <- read_weights_two_class_example()

  expect_equal(
    average_precision(two_class_example, truth, Class1, case_weights = weight)[[
      ".estimate"
    ]],
    py$case_weight$binary
  )

  py <- read_pydata("py-average-precision")

  hpc_cv$weight <- read_weights_hpc_cv()

  expect_equal(
    average_precision(
      hpc_cv,
      obs,
      VF:L,
      estimator = "macro",
      case_weights = weight
    )[[".estimate"]],
    py$case_weight$macro
  )
  expect_equal(
    average_precision(
      hpc_cv,
      obs,
      VF:L,
      estimator = "macro_weighted",
      case_weights = weight
    )[[".estimate"]],
    py$case_weight$macro_weighted
  )
})

test_that("works with hardhat case weights", {
  df <- two_class_example

  imp_wgt <- hardhat::importance_weights(seq_len(nrow(df)))
  freq_wgt <- hardhat::frequency_weights(seq_len(nrow(df)))

  expect_no_error(
    average_precision_vec(df$truth, df$Class1, case_weights = imp_wgt)
  )

  expect_no_error(
    average_precision_vec(df$truth, df$Class1, case_weights = freq_wgt)
  )
})

test_that("errors with class_pred input", {
  skip_if_not_installed("probably")

  cp_truth <- probably::as_class_pred(two_class_example$truth, which = 1)
  fct_truth <- two_class_example$truth
  fct_truth[1] <- NA

  estimate <- two_class_example$Class1

  expect_snapshot(
    error = TRUE,
    average_precision_vec(cp_truth, estimate)
  )
})

test_that("na_rm argument check", {
  expect_snapshot(
    error = TRUE,
    average_precision_vec(1, 1, na_rm = "yes")
  )
})

test_that("`event_level = 'second'` works", {
  df <- two_class_example

  df_rev <- df
  df_rev$truth <- stats::relevel(df_rev$truth, "Class2")

  expect_equal(
    average_precision_vec(df$truth, df$Class1),
    average_precision_vec(df_rev$truth, df_rev$Class1, event_level = "second")
  )
})

test_that("sklearn equivalent", {
  py <- read_pydata("py-average-precision")

  expect_equal(
    average_precision(two_class_example, truth, Class1)[[".estimate"]],
    py$binary
  )

  py <- read_pydata("py-average-precision")

  expect_equal(
    average_precision(hpc_cv, obs, VF:L, estimator = "macro")[[".estimate"]],
    py$macro
  )
  expect_equal(
    average_precision(hpc_cv, obs, VF:L, estimator = "macro_weighted")[[
      ".estimate"
    ]],
    py$macro_weighted
  )
})

test_that("known corner cases are correct", {
  # first value - tp = 1
  truth <- factor("a", levels = c("a", "b"))
  estimate <- 0.9
  df <- data.frame(truth, estimate)

  expect_equal(
    average_precision(df, truth, estimate)$.estimate,
    1
  )

  # With the recall == 0 case precision value
  # defined to be precision == 1, we also expect
  # these to match pr_auc()

  expect_equal(
    average_precision(df, truth, estimate)$.estimate,
    pr_auc(df, truth, estimate)$.estimate
  )

  # first value - fp = 1, no `truth` events
  truth <- factor("b", levels = c("a", "b"))
  estimate <- 0.9
  df <- data.frame(truth, estimate)

  expect_snapshot(out <- average_precision(df, truth, estimate)$.estimate)
  expect_identical(out, NaN)

  # Same as pr_auc()
  expect_snapshot(out <- average_precision(df, truth, estimate)$.estimate)
  expect_snapshot(expect <- pr_auc(df, truth, estimate)$.estimate)
  expect_identical(out, expect)
})

test_that("range values are correct", {
  direction <- metric_direction(average_precision)
  range <- metric_range(average_precision)
  perfect <- ifelse(direction == "minimize", range[1], range[2])
  worst <- ifelse(direction == "minimize", range[2], range[1])

  df <- tibble::tibble(
    truth = factor(c("a", "a", "a", "b", "b"), levels = c("a", "b")),
    perfect = c(1, 1, 1, 0, 0),
    off = c(0.5, 0.5, 0.5, 0.5, 0.5)
  )

  expect_equal(average_precision_vec(df$truth, df$perfect), perfect)

  if (direction == "minimize") {
    expect_gt(average_precision_vec(df$truth, df$off), perfect)
    expect_lte(average_precision_vec(df$truth, df$off), worst)
  }
  if (direction == "maximize") {
    expect_lt(average_precision_vec(df$truth, df$off), perfect)
    expect_gte(average_precision_vec(df$truth, df$off), worst)
  }
})

Try the yardstick package in your browser

Any scripts or data that you put into this service are public.

yardstick documentation built on April 8, 2026, 1:06 a.m.