tests/testthat/test_Assesser.R

context("Test Assesser and eval_function objects")

test_eval_frame <- get_test_eval_frame()

simple_eval_fun  <- function(df){
      sum(df$target == round(df$prediction))
}
evaluation <- eval_function(simple_eval_fun)

test_input_frame <- get_test_input_frame()
my_assesser <- Assesser$new(test_data = test_input_frame, id_names = ".id")
my_assesser$set_predictions(prediction_names = c("model_1", "model_2"))
my_assesser$set_segments(segment_names = "segment")
my_assesser$set_targets(target_names = "target")
my_assesser$evaluation_funs <- evaluation

test_that("Assesser private method get_eval_frame works as expected", {
  expect_equal(
    get_private(my_assesser)$get_eval_frame(additional_fields = NULL),
    test_eval_frame  %>%
      dplyr::arrange(.id)
  )
  my_assesser$test_data  <- my_assesser$test_data  %>%
    dplyr::mutate(add_column = TRUE)

  foo  <- get_private(my_assesser)$get_eval_frame(
    additional_fields = "add_column"
  )

  expect_true("add_column" %in% names(foo))
  expect_true(all(foo$add_column == TRUE))
})

test_that("Assesser assesses properly with simple evaluation function", {
  actual <- my_assesser$assess_model()[["performance_frame"]]
  expected <- assess_eval_frame(evaluation, test_eval_frame)
  expect_equal(actual, expected)
})
signaux-faibles/MLsegmentr documentation built on Aug. 29, 2019, 2:22 p.m.