tests/testthat/test-variational.R

if (!identical(Sys.getenv("NOT_CRAN"), "true")) {
  skip("No MCMC on CRAN")
} else {
  dino_spec <- dcm_specify(
    qmatrix = dcmdata::mdm_qmatrix,
    identifier = "item",
    measurement_model = dino(),
    structural_model = unconstrained()
  )
  dina_spec <- dcm_specify(
    qmatrix = dcmdata::mdm_qmatrix,
    identifier = "item",
    measurement_model = dina(),
    structural_model = independent(),
    priors = c(
      prior(beta(5, 17), type = "slip"),
      prior(beta(5, 17), type = "guess")
    )
  )

  out <- capture.output(
    suppressWarnings(suppressMessages(
      cmds_mdm_dino <- dcm_estimate(
        dino_spec,
        data = dcmdata::mdm_data,
        identifier = "respondent",
        missing = NA,
        method = "variational",
        seed = 63277,
        backend = "cmdstanr",
        draws = 1000
      )
    ))
  )
  out <- capture.output(
    suppressWarnings(suppressMessages(
      rstn_mdm_dina <- dcm_estimate(
        dina_spec,
        data = dcmdata::mdm_data,
        identifier = "respondent",
        missing = NA,
        method = "variational",
        seed = 63277,
        backend = "rstan"
      )
    ))
  )
}

# draws ------------------------------------------------------------------------
test_that("as_draws works", {
  skip_on_cran()

  draws <- as_draws(rstn_mdm_dina)
  expect_s3_class(draws, "draws_array")

  draws_a <- posterior::as_draws_array(rstn_mdm_dina)
  expect_s3_class(draws_a, "draws_array")

  draws_d <- posterior::as_draws_df(rstn_mdm_dina)
  expect_s3_class(draws_d, "draws_df")

  draws_l <- posterior::as_draws_list(cmds_mdm_dino)
  expect_s3_class(draws_l, "draws_list")

  draws_m <- posterior::as_draws_matrix(cmds_mdm_dino)
  expect_s3_class(draws_m, "draws_matrix")

  draws_r <- posterior::as_draws_rvars(cmds_mdm_dino)
  expect_s3_class(draws_r, "draws_rvars")
})

test_that("get_draws works as expected", {
  skip_on_cran()

  test_draws <- get_draws(cmds_mdm_dino)
  expect_equal(posterior::ndraws(test_draws), 1000)
  expect_equal(posterior::nvariables(test_draws), 22)
  expect_s3_class(test_draws, "draws_array")

  test_draws <- get_draws(rstn_mdm_dina, vars = c("log_Vc", "pi"), ndraws = 750)
  expect_equal(posterior::ndraws(test_draws), 750)
  expect_equal(posterior::nvariables(test_draws), 10)
  expect_s3_class(test_draws, "draws_array")
})

# extracts ---------------------------------------------------------------------
test_that("extract pi matrix", {
  dino_pimat <- measr_extract(cmds_mdm_dino, "pi_matrix")
  expect_equal(nrow(dino_pimat), 4)
  expect_equal(ncol(dino_pimat), 3)
  expect_equal(dino_pimat$item, dcmdata::mdm_qmatrix$item)
  expect_equal(
    colnames(dino_pimat)[-1],
    dplyr::pull(profile_labels(1), "class")
  )
  expect_true(all(vapply(dino_pimat[, -1], posterior::is_rvar, logical(1))))
  expect_true(all(vapply(dino_pimat[, -1], \(x) !any(is.na(x)), logical(1))))
})

test_that("extract model p-values", {
  dina_pimat <- measr_extract(rstn_mdm_dina, "exp_pvalues")
  expect_equal(nrow(dina_pimat), 4)
  expect_equal(ncol(dina_pimat), 4)
  expect_equal(dina_pimat$item, dcmdata::mdm_qmatrix$item)
  expect_equal(
    colnames(dina_pimat)[-1],
    c(dplyr::pull(profile_labels(1), "class"), "overall")
  )
  expect_true(all(vapply(dina_pimat[, -1], posterior::is_rvar, logical(1))))
  expect_true(all(vapply(dina_pimat[, -1], \(x) !any(is.na(x)), logical(1))))
})

# loglik -----------------------------------------------------------------------
test_that("loglik is calculated correctly", {
  skip_on_cran()

  cmds_log_lik <- loglik(cmds_mdm_dino)
  rstn_log_lik <- loglik(rstn_mdm_dina)

  # expected value from 2-class LCA fit in Mplus
  expect_equal(cmds_log_lik, -331.764, tolerance = 1.000)
  expect_equal(rstn_log_lik, -331.764, tolerance = 1.000)
})

# loo/waic ---------------------------------------------------------------------
test_that("loo and waic work", {
  skip_on_cran()

  err <- rlang::catch_cnd(loo(rstn_dina))
  expect_s3_class(err, "rlang_error")
  expect_match(err$message, "supports posterior distributions")

  err <- rlang::catch_cnd(waic(rstn_dino))
  expect_s3_class(err, "rlang_error")
  expect_match(err$message, "supports posterior distributions")

  check_loo <- loo(cmds_mdm_dino)
  expect_s3_class(check_loo, "psis_loo")

  check_waic <- waic(rstn_mdm_dina)
  expect_s3_class(check_waic, "waic")
})

test_that("loo and waic can be added to model", {
  skip_on_cran()

  loo_model <- add_criterion(cmds_mdm_dino, criterion = "loo")
  expect_equal(names(loo_model@criteria), "loo")
  expect_s3_class(loo_model@criteria$loo, "psis_loo")

  lw_model <- add_criterion(
    loo_model,
    criterion = c("loo", "waic"),
    overwrite = TRUE
  )
  expect_equal(names(lw_model@criteria), c("loo", "waic"))
  expect_s3_class(lw_model@criteria$loo, "psis_loo")
  expect_s3_class(lw_model@criteria$waic, "waic")
  expect_identical(loo_model@criteria$loo, lw_model@criteria$loo)

  expect_identical(measr_extract(lw_model, "loo"), lw_model@criteria$loo)
  expect_identical(measr_extract(lw_model, "waic"), lw_model@criteria$waic)

  expect_identical(lw_model@criteria$loo, loo(lw_model))
  expect_identical(lw_model@criteria$waic, waic(lw_model))
})

test_that("model comparisons work", {
  skip_on_cran()

  no_save <- loo_compare(cmds_mdm_dino, rstn_mdm_dina)
  expect_s3_class(no_save, "compare.loo")
  expect_equal(rownames(no_save), c("rstn_mdm_dina", "cmds_mdm_dino"))
  expect_equal(
    colnames(no_save),
    c(
      "elpd_diff",
      "se_diff",
      "elpd_loo",
      "se_elpd_loo",
      "p_loo",
      "se_p_loo",
      "looic",
      "se_looic"
    )
  )

  dino_compare <- add_criterion(cmds_mdm_dino, criterion = c("loo", "waic"))
  dino_save <- loo_compare(dino_compare, rstn_mdm_dina)
  expect_s3_class(dino_save, "compare.loo")
  expect_equal(rownames(dino_save), c("rstn_mdm_dina", "dino_compare"))
  expect_equal(
    colnames(dino_save),
    c(
      "elpd_diff",
      "se_diff",
      "elpd_loo",
      "se_elpd_loo",
      "p_loo",
      "se_p_loo",
      "looic",
      "se_looic"
    )
  )

  dina_compare <- add_criterion(rstn_mdm_dina, criterion = c("loo", "waic"))
  dina_save <- loo_compare(cmds_mdm_dino, dina_compare, criterion = "waic")
  expect_s3_class(dina_save, "compare.loo")
  expect_equal(rownames(dina_save), c("dina_compare", "cmds_mdm_dino"))
  expect_equal(
    colnames(dina_save),
    c(
      "elpd_diff",
      "se_diff",
      "elpd_waic",
      "se_elpd_waic",
      "p_waic",
      "se_p_waic",
      "waic",
      "se_waic"
    )
  )

  all_save <- loo_compare(dino_compare, dina_compare, criterion = "loo")
  expect_s3_class(all_save, "compare.loo")
  expect_equal(rownames(all_save), c("dina_compare", "dino_compare"))
  expect_equal(
    colnames(all_save),
    c(
      "elpd_diff",
      "se_diff",
      "elpd_loo",
      "se_elpd_loo",
      "p_loo",
      "se_p_loo",
      "looic",
      "se_looic"
    )
  )

  err <- rlang::catch_cnd(loo_compare(
    dino_compare,
    dina_compare,
    model_names = c("m1", "m2", "m3")
  ))
  expect_s3_class(err, "rlang_error")
  expect_match(err$message, "same as the number of models")

  err <- rlang::catch_cnd(loo_compare(dino_compare, no_save))
  expect_s3_class(err, "rlang_error")
  expect_match(err$message, "must be a .*measrdcm.* object")

  waic_comp <- loo_compare(
    dino_compare,
    dina_compare,
    criterion = "waic",
    model_names = c("first_model", "second_model")
  )
  expect_s3_class(waic_comp, "compare.loo")
  expect_equal(rownames(waic_comp), c("second_model", "first_model"))
  expect_equal(
    colnames(waic_comp),
    c(
      "elpd_diff",
      "se_diff",
      "elpd_waic",
      "se_elpd_waic",
      "p_waic",
      "se_p_waic",
      "waic",
      "se_waic"
    )
  )
})

# aic/bic ----------------------------------------------------------------------
test_that("aic and bic error", {
  err <- rlang::catch_cnd(aic(cmds_mdm_dino))
  expect_s3_class(err, "rlang_error")
  expect_match(err$message, "must be a model estimated with .*optim.*")

  err <- rlang::catch_cnd(aic(rstn_mdm_dina))
  expect_s3_class(err, "rlang_error")
  expect_match(err$message, "must be a model estimated with .*optim.*")

  err <- rlang::catch_cnd(bic(cmds_mdm_dino))
  expect_s3_class(err, "rlang_error")
  expect_match(err$message, "must be a model estimated with .*optim.*")

  err <- rlang::catch_cnd(bic(rstn_mdm_dina))
  expect_s3_class(err, "rlang_error")
  expect_match(err$message, "must be a model estimated with .*optim.*")
})

# bayes factors ----------------------------------------------------------------
test_that("log_mll works", {
  err <- rlang::catch_cnd(log_mll(rstn_dina))
  expect_s3_class(err, "rlang_error")
  expect_match(err$message, "must be a model estimated with")

  err <- rlang::catch_cnd(log_mll(cmds_mdm_dino))
  expect_s3_class(err, "rlang_error")
  expect_match(err$message, "must be a model estimated with")

  expect_equal(typeof(log_mll(rstn_mdm_dina)), "double")
  expect_equal(length(log_mll(rstn_mdm_dina)), 1)
  expect_false(identical(log_mll(rstn_mdm_dina), log_mll(rstn_mdm_dina)))

  store_mll <- rstn_mdm_dina
  expect_true(rlang::is_empty(store_mll@criteria$log_mll))
  store_mll <- add_criterion(store_mll, "log_mll")
  expect_false(rlang::is_empty(store_mll@criteria$log_mll))
  expect_equal(typeof(store_mll@criteria$log_mll), "double")
  expect_equal(length(store_mll@criteria$log_mll), 1)
  expect_identical(log_mll(store_mll), log_mll(store_mll))
  expect_false(identical(log_mll(store_mll), log_mll(store_mll, force = TRUE)))
})

test_that("bayes_factor works", {
  err <- rlang::catch_cnd(
    bayes_factor(rstn_mdm_dina, rstn_mdm_dina, prior_prob = "a")
  )
  expect_s3_class(err, "rlang_error")
  expect_match(err$message, "must be a number, not the string")

  err <- rlang::catch_cnd(
    bayes_factor(rstn_mdm_dina, "measrfit")
  )
  expect_s3_class(err, "rlang_error")
  expect_match(err$message, "must be a.*measrdcm.*object")

  err <- rlang::catch_cnd(
    bayes_factor(rstn_mdm_dina, "measrfit", "object")
  )
  expect_s3_class(err, "rlang_error")
  expect_match(err$message, "must all be.*measrdcm.*objects")

  err <- rlang::catch_cnd(
    bayes_factor(rstn_mdm_dina, rstn_mdm_dina, model_names = paste0("mod", 1:3))
  )
  expect_s3_class(err, "rlang_error")
  expect_match(err$message, "be of length 2, the same as the number of models")

  dina1 <- rstn_mdm_dina
  dina2 <- rstn_mdm_dina
  bf1 <- bayes_factor(dina1, dina2, prior_prob = NULL)
  expect_s3_class(bf1, "tbl_df")
  expect_equal(colnames(bf1), c("null_model", "alt_model", "bf"))
  expect_equal(nrow(bf1), 1)
  expect_equal(bf1$null_model, "dina1")
  expect_equal(bf1$alt_model, "dina2")
  expect_equal(typeof(bf1$bf), "double")
  expect_equal(bf1$bf, 1, tolerance = 0.1)

  dina3 <- rstn_mdm_dina
  bf2 <- bayes_factor(dina1, dina2, dina3, model_names = paste0("mod", 1:3))
  expect_s3_class(bf2, "tbl_df")
  expect_equal(
    colnames(bf2),
    c("null_model", "alt_model", "bf", "posterior_probs")
  )
  expect_equal(nrow(bf2), 3)
  expect_equal(bf2$null_model, c("mod1", "mod1", "mod2"))
  expect_equal(bf2$alt_model, c("mod2", "mod3", "mod3"))
  expect_equal(typeof(bf2$bf), "double")
  expect_equal(bf2$bf, rep(1, 3), tolerance = 0.1)
  expect_equal(typeof(bf2$posterior_probs), "list")
  for (i in seq_len(nrow(bf2))) {
    expect_equal(
      colnames(bf2$posterior_probs[[i]]),
      c("prior_prob_null", "posterior_prob_null")
    )
    expect_equal(
      bf2$posterior_probs[[i]]$prior_prob_null,
      seq(0.02, 0.98, by = 0.02)
    )
    expect_equal(
      bf2$posterior_probs[[i]]$posterior_prob_null,
      seq(0.02, 0.98, by = 0.02),
      tolerance = 0.1
    )
  }

  dina4 <- rstn_mdm_dina
  bf3 <- bayes_factor(dina1, dina2, dina3, dina4, prior_prob = seq(.1, .9, .1))
  expect_s3_class(bf3, "tbl_df")
  expect_equal(
    colnames(bf3),
    c("null_model", "alt_model", "bf", "posterior_probs")
  )
  expect_equal(nrow(bf3), 6)
  expect_equal(
    bf3$null_model,
    c("dina1", "dina1", "dina1", "dina2", "dina2", "dina3")
  )
  expect_equal(
    bf3$alt_model,
    c("dina2", "dina3", "dina4", "dina3", "dina4", "dina4")
  )
  expect_equal(typeof(bf3$bf), "double")
  expect_equal(bf3$bf, rep(1, 6), tolerance = 0.1)
  expect_equal(typeof(bf3$posterior_probs), "list")
  for (i in seq_len(nrow(bf3))) {
    expect_equal(
      colnames(bf3$posterior_probs[[i]]),
      c("prior_prob_null", "posterior_prob_null")
    )
    expect_equal(
      bf3$posterior_probs[[i]]$prior_prob_null,
      seq(0.1, 0.9, by = 0.1)
    )
    expect_equal(
      bf3$posterior_probs[[i]]$posterior_prob_null,
      seq(0.1, 0.9, by = 0.1),
      tolerance = 0.1
    )
  }
})

# ppmc -------------------------------------------------------------------------
test_that("ppmc works", {
  skip_on_cran()

  test_ppmc <- fit_ppmc(cmds_mdm_dino)
  expect_equal(test_ppmc, list())

  # test 1 -----
  test_ppmc <- fit_ppmc(
    cmds_mdm_dino,
    ndraws = 500,
    return_draws = 100,
    model_fit = "raw_score",
    item_fit = "conditional_prob"
  )
  expect_equal(names(test_ppmc), c("ppmc_raw_score", "ppmc_conditional_prob"))

  expect_s3_class(test_ppmc$ppmc_raw_score, "tbl_df")
  expect_equal(nrow(test_ppmc$ppmc_raw_score), 1L)
  expect_equal(
    colnames(test_ppmc$ppmc_raw_score),
    c(
      "obs_chisq",
      "ppmc_mean",
      "2.5%",
      "97.5%",
      "rawscore_samples",
      "chisq_samples",
      "ppp"
    )
  )
  expect_equal(nrow(test_ppmc$ppmc_raw_score$rawscore_samples[[1]]), 100)
  expect_equal(length(test_ppmc$ppmc_raw_score$chisq_samples[[1]]), 100)

  expect_s3_class(test_ppmc$ppmc_conditional_prob, "tbl_df")
  expect_equal(nrow(test_ppmc$ppmc_conditional_prob), 8L)
  expect_equal(
    colnames(test_ppmc$ppmc_conditional_prob),
    c(
      "item",
      "class",
      "obs_cond_pval",
      "ppmc_mean",
      "2.5%",
      "97.5%",
      "samples",
      "ppp"
    )
  )
  expect_equal(
    as.character(test_ppmc$ppmc_conditional_prob$item),
    rep(paste0("mdm", 1:4), each = 2)
  )
  expect_equal(
    as.character(test_ppmc$ppmc_conditional_prob$class),
    rep(c("[0]", "[1]"), 4)
  )
  expect_equal(
    vapply(test_ppmc$ppmc_conditional_prob$samples, length, integer(1)),
    rep(100, 8)
  )

  # test 2 -----
  test_ppmc <- fit_ppmc(
    rstn_mdm_dina,
    ndraws = 200,
    return_draws = 180,
    probs = c(0.055, 0.945),
    item_fit = c("odds_ratio", "pvalue")
  )
  expect_equal(names(test_ppmc), c("ppmc_odds_ratio", "ppmc_pvalue"))
  expect_s3_class(test_ppmc$ppmc_odds_ratio, "tbl_df")
  expect_equal(nrow(test_ppmc$ppmc_odds_ratio), 6L)
  expect_equal(
    colnames(test_ppmc$ppmc_odds_ratio),
    c(
      "item_1",
      "item_2",
      "obs_or",
      "ppmc_mean",
      "5.5%",
      "94.5%",
      "samples",
      "ppp"
    )
  )
  expect_equal(
    as.character(test_ppmc$ppmc_odds_ratio$item_1),
    c(rep("mdm1", 3), rep("mdm2", 2), "mdm3")
  )
  expect_equal(
    as.character(test_ppmc$ppmc_odds_ratio$item_2),
    c("mdm2", "mdm3", "mdm4", "mdm3", "mdm4", "mdm4")
  )
  expect_equal(
    vapply(test_ppmc$ppmc_odds_ratio$samples, length, integer(1)),
    rep(180, 6)
  )

  expect_s3_class(test_ppmc$ppmc_pvalue, "tbl_df")
  expect_equal(nrow(test_ppmc$ppmc_pvalue), 4)
  expect_equal(
    colnames(test_ppmc$ppmc_pvalue),
    c("item", "obs_pvalue", "ppmc_mean", "5.5%", "94.5%", "samples", "ppp")
  )
  expect_equal(as.character(test_ppmc$ppmc_pvalue$item), paste0("mdm", 1:4))
  expect_equal(
    vapply(test_ppmc$ppmc_pvalue$samples, length, double(1)),
    rep(180, 4)
  )

  # test 3 -----
  test_ppmc <- fit_ppmc(
    cmds_mdm_dino,
    ndraws = 1,
    return_draws = 0,
    model_fit = "raw_score",
    item_fit = c("conditional_prob", "odds_ratio", "pvalue")
  )
  expect_equal(
    names(test_ppmc),
    c(
      "ppmc_raw_score",
      "ppmc_conditional_prob",
      "ppmc_odds_ratio",
      "ppmc_pvalue"
    )
  )
  expect_equal(
    colnames(test_ppmc$ppmc_raw_score),
    c("obs_chisq", "ppmc_mean", "2.5%", "97.5%", "ppp")
  )
  expect_equal(
    colnames(test_ppmc$ppmc_conditional_prob),
    c("item", "class", "obs_cond_pval", "ppmc_mean", "2.5%", "97.5%", "ppp")
  )
  expect_equal(
    colnames(test_ppmc$ppmc_odds_ratio),
    c("item_1", "item_2", "obs_or", "ppmc_mean", "2.5%", "97.5%", "ppp")
  )
  expect_equal(
    colnames(test_ppmc$ppmc_pvalue),
    c("item", "obs_pvalue", "ppmc_mean", "2.5%", "97.5%", "ppp")
  )
})

test_that("model fit can be added", {
  skip_on_cran()

  test_model <- rstn_mdm_dina
  expect_equal(test_model@fit, list())

  # add m2 and ppmc odds ratios -----
  test_model <- add_fit(
    test_model,
    method = c("m2", "ppmc"),
    model_fit = NULL,
    item_fit = "odds_ratio",
    return_draws = 100
  )
  expect_equal(names(test_model@fit), c("m2", "ppmc_odds_ratio"))
  expect_equal(
    names(test_model@fit$ppmc_odds_ratio),
    c(
      "item_1",
      "item_2",
      "obs_or",
      "ppmc_mean",
      "2.5%",
      "97.5%",
      "samples",
      "ppp"
    )
  )
  expect_identical(
    test_model@fit[-which(names(test_model@fit) == "m2")],
    fit_ppmc(test_model, item_fit = "odds_ratio")
  )

  # nothing new does nothing -----
  test_model2 <- add_fit(test_model, method = "ppmc")
  expect_identical(test_model, test_model2)

  # now add raw score and conditional probs -- other fit should persist -----
  test_model <- add_fit(
    test_model,
    method = "ppmc",
    model_fit = "raw_score",
    item_fit = "conditional_prob",
    probs = c(0.055, 0.945)
  )
  expect_equal(
    names(test_model@fit),
    c("m2", "ppmc_odds_ratio", "ppmc_raw_score", "ppmc_conditional_prob")
  )
  expect_equal(
    names(test_model@fit$ppmc_raw_score),
    c("obs_chisq", "ppmc_mean", "5.5%", "94.5%", "ppp")
  )
  expect_equal(
    names(test_model@fit$ppmc_odds_ratio),
    c(
      "item_1",
      "item_2",
      "obs_or",
      "ppmc_mean",
      "2.5%",
      "97.5%",
      "samples",
      "ppp"
    )
  )
  expect_equal(
    names(test_model@fit$ppmc_conditional_prob),
    c("item", "class", "obs_cond_pval", "ppmc_mean", "5.5%", "94.5%", "ppp")
  )

  # now calculate conditional probs and overall pvalue - overall is new, -----
  # but conditional prob should use stored value
  test_ppmc <- fit_ppmc(
    test_model,
    model_fit = NULL,
    item_fit = c("conditional_prob", "pvalue")
  )
  expect_equal(names(test_ppmc), c("ppmc_conditional_prob", "ppmc_pvalue"))
  expect_identical(
    test_ppmc$ppmc_conditional_prob,
    test_model@fit$ppmc_conditional_prob
  )
  expect_equal(
    names(test_ppmc$ppmc_pvalue),
    c("item", "obs_pvalue", "ppmc_mean", "2.5%", "97.5%", "ppp")
  )

  # overwrite just conditional prob with samples and new probs -----
  # add overall p-values
  test_model <- add_fit(
    test_model,
    method = "ppmc",
    overwrite = TRUE,
    model_fit = NULL,
    item_fit = c("conditional_prob", "pvalue"),
    return_draws = 200,
    probs = c(.1, .9)
  )
  expect_equal(
    names(test_model@fit),
    c(
      "m2",
      "ppmc_odds_ratio",
      "ppmc_raw_score",
      "ppmc_conditional_prob",
      "ppmc_pvalue"
    )
  )
  expect_equal(
    names(test_model@fit$ppmc_raw_score),
    c("obs_chisq", "ppmc_mean", "5.5%", "94.5%", "ppp")
  )
  expect_equal(
    names(test_model@fit$ppmc_odds_ratio),
    c(
      "item_1",
      "item_2",
      "obs_or",
      "ppmc_mean",
      "2.5%",
      "97.5%",
      "samples",
      "ppp"
    )
  )
  expect_equal(
    names(test_model@fit$ppmc_conditional_prob),
    c(
      "item",
      "class",
      "obs_cond_pval",
      "ppmc_mean",
      "10%",
      "90%",
      "samples",
      "ppp"
    )
  )
  expect_equal(
    names(test_model@fit$ppmc_pvalue),
    c("item", "obs_pvalue", "ppmc_mean", "10%", "90%", "samples", "ppp")
  )

  # test extraction -----
  rs_check <- measr_extract(test_model, "ppmc_raw_score")
  expect_equal(rs_check, test_model@fit$ppmc_raw_score)

  cp_check <- measr_extract(test_model, "ppmc_conditional_prob")
  expect_equal(cp_check, test_model@fit$ppmc_conditional_prob)
  expect_equal(
    measr_extract(
      test_model,
      "ppmc_conditional_prob_flags",
      ppmc_interval = 0.95
    ),
    dplyr::filter(cp_check, ppp <= 0.025 | ppp >= 0.975)
  )
  expect_equal(
    measr_extract(
      test_model,
      "ppmc_conditional_prob_flags",
      ppmc_interval = 0.8
    ),
    dplyr::filter(cp_check, ppp <= 0.1 | ppp >= 0.9)
  )

  or_check <- measr_extract(test_model, "ppmc_odds_ratio")
  expect_equal(or_check, test_model@fit$ppmc_odds_ratio)
  expect_equal(
    measr_extract(test_model, "ppmc_odds_ratio_flags", ppmc_interval = 0.95),
    dplyr::filter(or_check, ppp <= 0.025 | ppp >= 0.975)
  )
  expect_equal(
    measr_extract(test_model, "ppmc_odds_ratio_flags", ppmc_interval = 0.8),
    dplyr::filter(or_check, ppp <= 0.1 | ppp >= 0.9)
  )

  pval_check <- measr_extract(test_model, "ppmc_pvalue")
  expect_equal(pval_check, test_model@fit$ppmc_pvalue)
  expect_equal(
    measr_extract(test_model, "ppmc_pvalue_flags", ppmc_interval = 0.95),
    dplyr::filter(pval_check, ppp <= 0.025 | ppp >= 0.975)
  )
  expect_equal(
    measr_extract(test_model, "ppmc_pvalue_flags", ppmc_interval = 0.6),
    dplyr::filter(pval_check, ppp <= 0.2 | ppp >= 0.8)
  )
})

# reliability ------------------------------------------------------------------
test_that("reliability works", {
  reli <- reliability(cmds_mdm_dino, threshold = 0.5)

  expect_equal(
    names(reli),
    c("pattern_reliability", "map_reliability", "eap_reliability")
  )
  expect_equal(names(reli$pattern_reliability), c("p_a", "p_c"))
  expect_equal(names(reli$map_reliability), c("accuracy", "consistency"))

  # column names ---------------------------------------------------------------
  expect_equal(
    names(reli$map_reliability$accuracy),
    c(
      "attribute",
      "acc",
      "lambda_a",
      "kappa_a",
      "youden_a",
      "tetra_a",
      "tp_a",
      "tn_a"
    )
  )
  expect_equal(
    names(reli$map_reliability$consistency),
    c(
      "attribute",
      "consist",
      "lambda_c",
      "kappa_c",
      "youden_c",
      "tetra_c",
      "tp_c",
      "tn_c",
      "gammak",
      "pc_prime"
    )
  )
  expect_equal(
    names(reli$eap_reliability),
    c("attribute", "rho_pf", "rho_bs", "rho_i", "rho_tb")
  )

  # row names ------------------------------------------------------------------
  expect_equal(reli$map_reliability$accuracy$attribute, "multiplication")
  expect_equal(reli$map_reliability$consistency$attribute, "multiplication")
  expect_equal(reli$eap_reliability$attribute, "multiplication")
})

# respondent scores ------------------------------------------------------------
test_that("respondent probabilities are correct", {
  skip_on_cran()

  mdm_preds <- score(
    cmds_mdm_dino,
    newdata = dcmdata::mdm_data,
    identifier = "respondent",
    summary = TRUE
  )
  mdm_full_preds <- score(cmds_mdm_dino, summary = FALSE)

  # dimensions are correct -----
  expect_equal(
    names(mdm_preds),
    c("class_probabilities", "attribute_probabilities")
  )
  expect_equal(
    colnames(mdm_preds$class_probabilities),
    c("respondent", "class", "probability", "2.5%", "97.5%")
  )
  expect_equal(
    colnames(mdm_preds$attribute_probabilities),
    c("respondent", "attribute", "probability", "2.5%", "97.5%")
  )
  expect_equal(
    nrow(mdm_preds$class_probabilities),
    nrow(dcmdata::mdm_data) * (2^1)
  )
  expect_equal(
    nrow(mdm_preds$attribute_probabilities),
    nrow(dcmdata::mdm_data) * 1
  )

  expect_equal(
    names(mdm_full_preds),
    c("class_probabilities", "attribute_probabilities")
  )
  expect_equal(
    colnames(mdm_full_preds$class_probabilities),
    c("respondent", "[0]", "[1]")
  )
  expect_equal(
    colnames(mdm_full_preds$attribute_probabilities),
    c("respondent", "multiplication")
  )
  expect_equal(
    nrow(mdm_full_preds$class_probabilities),
    nrow(dcmdata::mdm_data)
  )
  expect_equal(
    nrow(mdm_full_preds$attribute_probabilities),
    nrow(dcmdata::mdm_data)
  )

  # extract works -----
  expect_equal(cmds_mdm_dino@respondent_estimates, list())

  cmds_mdm_dino <- add_respondent_estimates(cmds_mdm_dino)
  expect_equal(cmds_mdm_dino@respondent_estimates, mdm_preds)
  expect_equal(
    measr_extract(cmds_mdm_dino, "class_prob"),
    mdm_preds$class_probabilities |>
      dplyr::select("respondent", "class", "probability") |>
      tidyr::pivot_wider(names_from = "class", values_from = "probability")
  )
  expect_equal(
    measr_extract(cmds_mdm_dino, "attribute_prob"),
    mdm_preds$attribute_prob |>
      dplyr::select("respondent", "attribute", "probability") |>
      tidyr::pivot_wider(names_from = "attribute", values_from = "probability")
  )
})

Try the measr package in your browser

Any scripts or data that you put into this service are public.

measr documentation built on Jan. 14, 2026, 5:08 p.m.