R/classification_metric.R

Defines functions classification_metric binary_confusion_matrix generalized_binary_confusion_matrix num_true_positives num_false_positives num_false_negatives num_true_negatives num_generalized_true_positives num_generalized_false_positives num_generalized_false_negatives num_generalized_true_negatives performance_measures true_positive_rate false_positive_rate false_negative_rate true_negative_rate generalized_true_positive_rate generalized_false_positive_rate generalized_false_negative_rate generalized_true_negative_rate positive_predictive_value false_discovery_rate false_omission_rate negative_predictive_value accuracy error_rate true_positive_rate_difference false_positive_rate_difference false_negative_rate_difference false_omission_rate_difference false_discovery_rate_difference false_positive_rate_ratio false_negative_rate_ratio false_omission_rate_ratio false_discovery_rate_ratio average_odds_difference average_abs_odds_difference error_rate_difference error_rate_ratio num_pred_positives num_pred_negatives selection_rate classification_disparate_impact classification_statistical_parity_difference generalized_entropy_index between_all_groups_generalized_entropy_index between_group_generalized_entropy_index theil_index coefficient_of_variation between_group_theil_index between_group_coefficient_of_variation between_all_groups_theil_index between_all_groups_coefficient_of_variation equal_opportunity_difference power precision recall sensitivity specificity

Documented in accuracy average_abs_odds_difference average_odds_difference between_all_groups_coefficient_of_variation between_all_groups_generalized_entropy_index between_all_groups_theil_index between_group_coefficient_of_variation between_group_generalized_entropy_index between_group_theil_index binary_confusion_matrix classification_disparate_impact classification_metric classification_statistical_parity_difference coefficient_of_variation equal_opportunity_difference error_rate error_rate_difference error_rate_ratio false_discovery_rate false_discovery_rate_difference false_discovery_rate_ratio false_negative_rate false_negative_rate_difference false_negative_rate_ratio false_omission_rate false_omission_rate_difference false_omission_rate_ratio false_positive_rate false_positive_rate_difference false_positive_rate_ratio generalized_binary_confusion_matrix generalized_entropy_index generalized_false_negative_rate generalized_false_positive_rate generalized_true_negative_rate generalized_true_positive_rate negative_predictive_value num_false_negatives num_false_positives num_generalized_false_negatives num_generalized_false_positives num_generalized_true_negatives num_generalized_true_positives num_pred_negatives num_pred_positives num_true_negatives num_true_positives performance_measures positive_predictive_value power precision recall selection_rate sensitivity specificity theil_index true_negative_rate true_positive_rate true_positive_rate_difference

#' Class for computing metrics based on two BinaryLabelDatasets. The first dataset is the original one and the second is the output of the classification transformer (or similar)
#' @param dataset (BinaryLabelDataset) Dataset containing ground-truth labels
#' @param classified_dataset (BinaryLabelDataset) Dataset containing predictions
#' @param privileged_groups (list(list(list))) Privileged groups. Format is a list of `lists` where the keys are `protected_attribute_names` and the values are values in `protected_attributes`. Each `list` element describes a single group. See examples for more details.
#' @param unprivileged_groups (list(list(list))): Unprivileged groups in the same format as `privileged_groups`
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
classification_metric <- function(dataset,
                                  classified_dataset,
                                  unprivileged_groups=NULL,
                                  privileged_groups=NULL){
   if(missing(unprivileged_groups)) {
      u_dict <- NULL
   } else {
      u_dict <- lapply(unprivileged_groups, py_dict_conv)
   }
   if(missing(privileged_groups)) {
      p_dict <- NULL
   } else {
      p_dict <- lapply(privileged_groups, py_dict_conv)
   }

   cm <- py_suppress_warnings(metrics$ClassificationMetric(dataset,
                                                     classified_dataset,
                                                     u_dict,
                                                     p_dict))

   class(cm) <- c("classification_metric",class(cm))
   return(cm)

}

#' Compute the number of true/false positives/negatives, optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @param dnn input option
#' @return (list) Number of true positives, false positives, true negatives, false negatives (optionally conditioned)
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
binary_confusion_matrix <- function(class_metric, privileged=NULL, dnn = c("Reference", "Prediction")){
   bcm <- py_suppress_warnings(class_metric$binary_confusion_matrix(privileged))
   mat = matrix(c(bcm$TN, bcm$FN, bcm$FP, bcm$TP), nrow=2, ncol=2,
                dimnames = list(c("0","1"), c("0","1")))
   names(dimnames(mat)) <- dnn
   return(mat)
}

#' Compute the number of generalized true/false positives/negatives, optionally conditioned on protected attributes. Generalized counts are based on scores and not on the hard predictions
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @param dnn option
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
generalized_binary_confusion_matrix <- function(class_metric, privileged=NULL, dnn = c("Reference", "Prediction")){
   gbcm <- py_suppress_warnings(class_metric$generalized_binary_confusion_matrix(privileged))
   mat = matrix(c(gbcm$TN, gbcm$FN, gbcm$FP, gbcm$TP), nrow=2, ncol=2,
                dimnames = list(c("0","1"), c("0","1")))
   names(dimnames(mat)) <- dnn
   return(mat)
}

#' Return the number of instances in the dataset where both the predicted and true labels are 'favorable' optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
num_true_positives <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$num_true_positives(privileged))
}

#' Return the number of instances in the dataset where both the predicted labels are 'favorable' but the true labels are 'unfavorable' optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
num_false_positives <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$num_false_positives(privileged))
}

#' Return the number of instances in the dataset where both the predicted labels are 'unfavorable' but the true labels are 'favorable' optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
num_false_negatives <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$num_false_negatives(privileged))
}

#' Return the number of instances in the dataset where both the predicted and true labels are 'unfavorable' optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
num_true_negatives <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$num_true_negatives(privileged))
}

#' Return the generalized number of true positives, the weighted sum of predicted scores where true labels are 'favorable', optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
num_generalized_true_positives <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$num_generalized_true_positives(privileged))
}

#' Return the generalized number of false positives, the weighted sum of predicted scores where true labels are 'unfavorable', optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
num_generalized_false_positives <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$num_generalized_false_positives(privileged))
}

#' Return the generalized number of false negatives, the weighted sum of predicted scores where false labels are 'favorable', optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
num_generalized_false_negatives <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$num_generalized_false_negatives(privileged))
}

#' Return the generalized number of true negatives, the weighted sum of predicted scores where false labels are 'unfavorable', optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
num_generalized_true_negatives <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$num_generalized_true_negatives(privileged))
}

#' Compute various performance measures on the dataset, optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @return (list) True positive rate, true negative rate, false positive rate, false negative rate, positive predictive value, negative predictive value, false discover rate, false omission rate, and accuracy (optionally conditioned)
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
performance_measures <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$performance_measures(privileged))
}

#' Return the ratio of true positives to positive examples in the dataset optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
true_positive_rate <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$true_positive_rate(privileged))
}

#' Return the ratio of false positives to negative examples in the dataset optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
false_positive_rate <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$false_positive_rate(privileged))
}

#' Return the ratio of false negatives to positive examples in the dataset optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
false_negative_rate <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$false_negative_rate(privileged))
}

#' Return the ratio of true negatives to negative examples in the dataset optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
true_negative_rate <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$true_negative_rate(privileged))
}

#' Return the ratio of generalized true positives to positive examples in the dataset optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
generalized_true_positive_rate <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$generalized_true_positive_rate(privileged))
}

#' Return the ratio of generalized false positives to negative examples in the dataset optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
generalized_false_positive_rate <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$generalized_false_positive_rate(privileged))
}

#' Return the ratio of generalized false negatives to positive examples in the dataset optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
generalized_false_negative_rate <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$generalized_false_negative_rate(privileged))
}

#' Return the ratio of generalized true negatives to negative examples in the dataset optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
generalized_true_negative_rate <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$generalized_true_negative_rate(privileged))
}

#' Return the ratio of true positives to positive predictions in the dataset optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
positive_predictive_value <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$positive_predictive_value(privileged))
}

#' Return the ratio of false positives to positive predictions in the dataset optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
false_discovery_rate <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$false_discovery_rate(privileged))
}

#' Return the ratio of false negatives to negative predictions in the dataset optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
false_omission_rate <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$false_omission_rate(privileged))
}

#' Return the ratio of true negatives to negative predictions in the dataset optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
negative_predictive_value <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$negative_predictive_value(privileged))
}

#' Return the ratio of correct predictions to total examples in the dataset optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
accuracy <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$accuracy(privileged))
}

#' Return the ratio of incorrect predictions to total examples in the dataset optionally conditioned on protected attributes
#' @param privileged (bool, optional) Boolean prescribing whether to condition this metric on the `privileged_groups`, if `TRUE`, or the `unprivileged_groups`, if `FALSE`. Defaults to `NULL` meaning this metric is computed over the entire dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
error_rate <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$error_rate(privileged))
}

#' Return the difference in true positive rates between the privileged and unprivileged groups
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
true_positive_rate_difference <- function(class_metric){
   py_suppress_warnings(class_metric$true_positive_rate_difference())
}

#' Return the difference in false positive rates between the privileged and unprivileged groups
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
false_positive_rate_difference <- function(class_metric){
   py_suppress_warnings(class_metric$false_positive_rate_difference())
}

#' Return the difference in false negative rates between the privileged and unprivileged groups
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
false_negative_rate_difference <- function(class_metric){
   py_suppress_warnings(class_metric$false_negative_rate_difference())
}

#' Return the difference in false omission rates between the privileged and unprivileged groups
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
false_omission_rate_difference <- function(class_metric){
   py_suppress_warnings(class_metric$false_omission_rate_difference())
}

#' Return the difference in false discovery rates between the privileged and unprivileged groups
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
false_discovery_rate_difference <- function(class_metric){
   py_suppress_warnings(class_metric$false_discovery_rate_difference())
}

#' Return the ratio of the unprivileged group false positive rate to privileged group false positive rate
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
false_positive_rate_ratio <- function(class_metric){
   py_suppress_warnings(class_metric$false_positive_rate_ratio())
}

#' Return the ratio of the unprivileged group false negative rate to privileged group false negative rate
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
false_negative_rate_ratio <- function(class_metric){
   py_suppress_warnings(class_metric$false_negative_rate_ratio())
}

#' Return the ratio of the unprivileged group false omission rate to privileged group false omission rate
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
false_omission_rate_ratio <- function(class_metric){
   py_suppress_warnings(class_metric$false_omission_rate_ratio())
}

#' Return the ratio of the unprivileged group false discovery rate to privileged group false discovery rate
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
false_discovery_rate_ratio <- function(class_metric){
   py_suppress_warnings(class_metric$false_discovery_rate_ratio())
}

#' Average of difference in FPR and TPR for unprivileged and privileged groups. A value of 0 indicates equality of odds
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
average_odds_difference <- function(class_metric){
   py_suppress_warnings(class_metric$average_odds_difference())
}

#' Average of absolute difference in FPR and TPR for unprivileged and privileged groups. A value of 0 indicates equality of odds
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
average_abs_odds_difference <- function(class_metric){
   py_suppress_warnings(class_metric$average_abs_odds_difference())
}

#' Difference in error rates for unprivileged and privileged groups
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
error_rate_difference <- function(class_metric){
   py_suppress_warnings(class_metric$error_rate_difference())
}

#' Ratio of error rates for unprivileged and privileged groups
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
error_rate_ratio <- function(class_metric){
   py_suppress_warnings(class_metric$error_rate_ratio())
}

#' The number of positive predictions in the dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
num_pred_positives <- function(class_metric){
   py_suppress_warnings(class_metric$num_pred_positives())
}

#' The number of negative predictions in the dataset
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
num_pred_negatives <- function(class_metric){
   py_suppress_warnings(class_metric$num_pred_negatives())
}

#' The ratio of positive predictions to examples
#' @param class_metric metric class instance
#' @param privileged privileged
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
selection_rate <- function(class_metric, privileged=NULL){
   py_suppress_warnings(class_metric$selection_rate(privileged))
}

#' Compute the disparate impact between the privileged and unprivileged groups
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
classification_disparate_impact <- function(class_metric){
   py_suppress_warnings(class_metric$disparate_impact())
}

#' Compute the statistical parity difference between the privileged and unprivileged groups
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
classification_statistical_parity_difference <- function(class_metric){
   py_suppress_warnings(class_metric$statistical_parity_difference())
}

#' Generalized entropy index is proposed as a unified individual and group fairness measure
#' @param alpha (int) Parameter that regulates the weight given to distances between values at different parts of the distribution
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
generalized_entropy_index <- function(class_metric,alpha=2){
   py_suppress_warnings(class_metric$generalized_entropy_index(alpha))
}

#' Between-group generalized entropy index that uses all combinations of groups based on `self.dataset.protected_attributes`
#' @param alpha (int) Parameter that regulates the weight given to distances between values at different parts of the distribution
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
between_all_groups_generalized_entropy_index <- function(class_metric, alpha=2){
   py_suppress_warnings(class_metric$between_all_groups_generalized_entropy_index(alpha))
}

#' Between-group generalized entropy index that uses `self.privileged_groups` and `self.unprivileged_groups` as the only two groups
#' @param alpha (int) Parameter that regulates the weight given to distances between values at different parts of the distribution
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
between_group_generalized_entropy_index <- function(class_metric, alpha=2){
   py_suppress_warnings(class_metric$between_group_generalized_entropy_index(alpha))
}

#' The Theil index is the `generalized_entropy_index` with `alpha = 1`
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
theil_index <- function(class_metric){
   py_suppress_warnings(class_metric$theil_index())
}

#' The coefficient of variation is two times the square root of the `generalized_entropy_index` with `alpha = 2`
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
coefficient_of_variation <- function(class_metric){
   py_suppress_warnings(class_metric$coefficient_of_variation())
}

#' The between-group Theil index is the `between_group_generalized_entropy_index` with `alpha = 1`
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
between_group_theil_index <- function(class_metric){
   py_suppress_warnings(class_metric$between_group_theil_index())
}

#' The between-group coefficient of variation is two times the square root of the `between_group_generalized_entropy_index` with `alpha = 2`
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
between_group_coefficient_of_variation <- function(class_metric){
   py_suppress_warnings(class_metric$between_group_coefficient_of_variation())
}

#' The between-group Theil index is the `between_all_groups_generalized_entropy_index` with `alpha = 1`
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
between_all_groups_theil_index <- function(class_metric){
   py_suppress_warnings(class_metric$between_all_groups_theil_index())
}

#' The between-group coefficient of variation is two times the square root of the `between_all_groups_generalized_entropy_index` with `alpha = 2`
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
between_all_groups_coefficient_of_variation <- function(class_metric){
   py_suppress_warnings(class_metric$between_all_groups_coefficient_of_variation())
}

#' Alias of `true_positive_rate_difference`
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
equal_opportunity_difference <- function(class_metric){
   py_suppress_warnings(class_metric$equal_opportunity_difference())
}

#' Alias of `num_true_positives`
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
power <- function(class_metric){
   py_suppress_warnings(class_metric$power())
}

#' Alias of `positive_predictive_value`
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
precision <- function(class_metric){
   py_suppress_warnings(class_metric$precision())
}

#' Alias of `true_positive_rate`
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
recall <- function(class_metric){
   py_suppress_warnings(class_metric$recall())
}

#' Alias of `true_positive_rate`
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
sensitivity <- function(class_metric){
   py_suppress_warnings(class_metric$sensitivity())
}

#' Alias of `true_negative_rate`
#' @param class_metric metric class instance
#' @export
#' @importFrom reticulate py_suppress_warnings
#'
specificity <- function(class_metric){
   py_suppress_warnings(class_metric$specificity())
}
SSaishruthi/raif-test documentation built on Oct. 30, 2019, 11:12 p.m.