R/utils.R

Defines functions sum.scales sum.classes compute_breaks summarize_scales summarize_classes wide_lsm get.scales.names

Documented in get.scales.names

# Functions **********************************************************####
## Exported functions ************************************************####
#' @name get.scales.names
#' @export
#' @title Get Scales Names
#' @description this function grab the names of the scales to be used as predictos in multifit function or any other.
#' @param x a data.frame generated by [lsm2multifit()]
#' @usage get.scales.names(x)
#' @examples
#' \dontrun{
#' dec        <- decouple(r,p,c(1000,2000,3000))
#' lsmdata    <- calc_lsm(dec, level = "landscape", metric = "shdi")
#' data.mfit  <- lsm2multifit(lsmdata, biodata=euglossini,
#'                            level = "landscape", metrics="shdi")
#' get.scales.names(data.mfit)
#' }
get.scales.names <- function(x){
  out <- names(x)[grepl("X\\d+", names(x))]
  return(out)
}

# Internal Functions ####
#' @title Wide long tables by scales (layers)
#' @description This is an internal function designed to convert long table
#' formats to wide tables with columns as scales, as expected
#' by multifit function
#' @name wide_lsm
#' @param data a data frame from [extract_metrics()] or [calc_lsm()]
#' @param class which class you want to keep. If empty the first value will be taken. If you passa an invalid value, the nearest will be taken.
#' @param metric which metric you want to keep. If empty the first value will be taken. If invalid, the nearest match will be taken.
#' @noRd
wide_lsm <- function(data=NULL, class=NULL, metric=NULL){
  if(is.null(data)) {
    stop("No data provided. Check your data!")
  } else if(!is.data.frame(data)) {
    stop("\n Your data seems not to be a data frame. \n Run 'calc_lsm' or 'extract_metrics' functions")
  }
  if(is.null(metric)){
    selected_metric <- levels(as.factor(data$metric))[1]
    warning(paste0("You did not specify the metrics. The first was taken:\n metric = '",selected_metric, "'"))
  } else {
    if(metric %in% data$metric) selected_metric <- metric
    else {
      selected_metric <- agrep(metric,data$metric, value = T)[1]
      warning(paste0("\n\t You provided as metric: ",metric,"\n\t",
                     " but it wasn't found and the nearest match was taken: ", selected_metric))
    }
  }
  if(is.null(class)){
    selected_class <- levels(as.factor(data$class))[1]
    warning(paste0("You did not specify the class. The first was taken:\n class = '", selected_class, "'"))
  } else {
    if(class %in% data$class) selected_class <- class
    else {
      selected_class <- agrep(class,data$class, value = T)[1]
      warning(paste0("\n\t You provided as class: ",class,"\n\t",
                     " but it wasn't found and the nearest match was taken: ", selected_class))
    }
  }
  data <- data %>% dplyr::filter(class == selected_class) %>% filter(metric == selected_metric)
  tidyr::pivot_wider(data, id_cols = "site", names_from = "layer", values_from = "value")
}
## Going to be deprecated in next versions ######
#' @name summarize_classes
#' @title summarize classes
#' @keywords internal
#' @description Several times we would like aggregate two or more classes.
#' This function takes the output from [extract_metrics()] or [calc_lsm()] and
#' summarize any given classes by user's provided function
#' @param x an output from extract metrics
#' @param vals the values of classes that should be summarized
#' @param var the metric to be summed
#' @param fun a function to summarize the metric. Popular functions are "sum", "mean"...
#' Users can also use custom functions as: function(x){log(x+1)}
#' @seealso [extract_metrics()] [calc_lsm()] [summarize_scales()]
#' @noRd
summarize_classes <- function(x, vals, var="percent", fun="sum"){
  sites   = unique(x$site)
  layer   = unique(x$layer)
  classes = unique(x$class)

  all.comb <- expand.grid("class"= sort(classes), "layer" = layer, "site"=sites)

  td <- merge(x, all.comb, all=TRUE)
  td[is.na(td)] <- 0

  exp <- td$class %in% vals
  fd <- td[exp,]
  fd <- fd[fd$metrics == var,]

  sum.data <- aggregate(value ~ site+layer, data=fd, FUN=fun)
  sum.data <- sum.data[order(sum.data$site),]
  names(sum.data) <- c(names(sum.data)[1:2], var)

  return(sum.data)
}

#' @name summarize_scales
#' @keywords internal
#' @title summarize scales
#' @description This function takes the output from extract_metrics or calc_lsm and
#' summarize scales by any provided metric and function.
#' Popular functions are "mean","sum"...
#' @param x an output from extract metrics
#' @param metric which metrics might be summed
#' @param fun a function to sum the metrics. Popular functions are "sum", "mean".
#' Users can also use custom functions as: function(x){log(x+1)}
#' @seealso [extract_metrics] [calc_lsm] [summarize_classes]
#' @noRd
summarize_scales <- function(x, metric, fun="mean"){
  sites   = unique(x$site)
  layer   = unique(x$layer)
  classes = unique(x$class)

  all.comb <- expand.grid("class"= sort(classes), "layer" = layer, "site"=sites)

  td <- merge(x, all.comb, all=TRUE)
  td[is.na(td)] <- 0

  fd <- td[td$metrics == metric,]

  sum.data <- aggregate(value ~ site+layer, data=fd, FUN=fun)
  sum.data <- sum.data[order(sum.data$site),]
  names(sum.data) <- c(names(sum.data)[1:2], metric)

  return(sum.data)
}
## Functions in use ####
### Plotting related functions ####
#' @name compute_breaks
#' @noRd
#' @keywords internal
compute_breaks <- function(x){
  if(is.list(x)){
    temp<-lapply(unlist(x),raster::getValues)
    temp<-as.numeric(levels(as.factor(unlist(temp))))
  }else if(grepl("Raster",class(x))){
    temp <- as.numeric(levels(as.factor(raster::getValues(x))))
  }
  return(temp)
}


### Other utility functions ####
#' @name sum.classes
#' @title summarize classes
#' @description Several times we would like aggregate two or more classes.
#' This function takes the output from extract_metrics or calc_lsm and
#' summarize any given classes by user provide function
#' @param x an output from extract metrics
#' @param vals the values of classes that should be summarized
#' @param var the metric to be summed
#' @param fun a function to summarize the metric. Popular functions are "sum", "mean"...
#' Users can also use custom functions as: function(x){log(x+1)}
#' @seealso extract_metrics calc_lsm sum.scales
#' @noRd
sum.classes <- function(x, vals, var="percent", fun="sum"){
  sites   = unique(x$site)
  scales  = unique(x$scale)
  classes = unique(x$class)

  all.comb <- expand.grid("class"= sort(classes), "scale" = scales, "site"=sites)

  td <- merge(x, all.comb, all=TRUE)
  td[is.na(td)] <- 0

  exp <- td$class %in% vals
  fd <- td[exp,]
  fd <- fd[fd$metrics == var,]

  sum.data <- aggregate(value ~ site+scale, data=fd, FUN=fun)
  sum.data <- sum.data[order(sum.data$site),]
  names(sum.data) <- c(names(sum.data)[1:2], var)

  return(sum.data)
}

#' @name sum.scales
#' @title summarize scales
#' @description This function takes the output from extract_metrics or calc_lsm and
#' summarize scales by any provided metric and function.
#' Popular functions ara "mean","sum"...
#' @param x an output from extract metrics
#' @param vals the values that should be summed
#' @param var column name indicating which metrics might be summed
#' @param fun a function to sum the metrics. Popular functions are "sum", "mean".
#' Users can also use custom functions as: function(x){log(x+1)}
#' @seealso extract_metrics calc_lsm sum.classes
#' @noRd
sum.scales <- function(x, metric, fun="mean"){
  sites   = unique(x$site)
  scales  = unique(x$scale)
  classes = unique(x$class)

  all.comb <- expand.grid("class"= sort(classes), "scale" = scales, "site"=sites)

  td <- merge(x, all.comb, all=TRUE)
  td[is.na(td)] <- 0

  fd <- td[td$metrics == metric,]

  sum.data <- aggregate(value ~ site+scale, data=fd, FUN=fun)
  sum.data <- sum.data[order(sum.data$site),]
  names(sum.data) <- c(names(sum.data)[1:2], metric)

  return(sum.data)
}
wilsonfrantine/landscapeDecoupler documentation built on Oct. 31, 2024, 3:45 a.m.