R/ecospat.grid.clim.dyn_custom.R

Defines functions ecospat.grid.clim.dyn_custom

#' @import ecospat
NULL
#' Custom version of ecospat's function that allows you to remove NA values.
#'
#' @export
ecospat.grid.clim.dyn_custom <- function(glob, glob1, sp, R, th.sp = 0, th.env = 0,
                                         geomask = NULL,removeNA=T) {
  
  glob <- as.matrix(glob)
  glob1 <- as.matrix(glob1)
  sp <- as.matrix(sp)
  l <- list()
  
  if (ncol(glob) > 2)
    stop("cannot calculate overlap with more than two axes")
  
  if (ncol(glob) == 1) {
    # if scores in one dimension (e.g. LDA,SDM predictions,...)
    xmax <- max(glob[, 1])
    xmin <- min(glob[, 1])
    x <- seq(from = min(glob[, 1]), to = max(glob[, 1]), length.out = R) # breaks on score gradient 1
    sp.dens <- density(sp[, 1], kernel = "gaussian", from = xmin, to = xmax,
                       n = R, cut = 0) # calculate the density of occurrences in a vector of R pixels along the score gradient
    # using a gaussian kernel density function, with R bins.
    glob1.dens <- density(glob1[, 1], kernel = "gaussian", from = xmin,
                          to = xmax, n = R, cut = 0) # calculate the density of environments in glob1
    z <- sp.dens$y * nrow(sp)/sum(sp.dens$y) # rescale density to the number of occurrences in sp
    # number of occurrence/pixel
    Z <- glob1.dens$y * nrow(glob)/sum(glob1.dens$y) # rescale density to the number of sites in glob1
    glob1r <- sapply(glob1, findInterval, glob1.dens$x)
    th.env <- quantile(glob1.dens$y[glob1r], th.env)
    glob1rm <- which(Z < th.env)
    spr <- sapply(sp, findInterval, sp.dens$x)
    th.sp <- quantile(sp.dens$y[spr], th.sp)
    sprm <- which(z < th.sp)
    z[sprm] <- 0 # remove infinitesimally small number generated by kernel density function
    Z[glob1rm] <- 0 # remove infinitesimally small number generated by kernel density function
    
    z.uncor <- z/max(z) # rescale between [0:1] for comparison with other species
    z.cor <- z/Z # correct for environment prevalence
    z.cor[is.na(z.cor)] <- 0 # remove n/0 situations
    z.cor[z.cor == "Inf"] <- 0 # remove 0/0 situations
    z.cor <- z.cor/max(z.cor) # rescale between [0:1] for comparison with other species
    w <- z.uncor
    w[w > 0] <- 1
    l$x <- x
    l$z <- z
    l$z.uncor <- z.uncor
    l$z.cor <- z.cor
    l$Z <- Z
    l$glob <- glob
    l$glob1 <- glob1
    l$sp <- sp
    l$w <- w
  }
  
  if (ncol(glob) == 2) {
    # if scores in two dimensions (e.g. PCA)
    
    xmin <- min(glob[, 1])
    xmax <- max(glob[, 1])
    ymin <- min(glob[, 2])
    ymax <- max(glob[, 2]) # data preparation
    glob1r <- data.frame(cbind((glob1[, 1] - xmin)/abs(xmax - xmin), (glob1[,
                                                                            2] - ymin)/abs(ymax - ymin))) # data preparation
    spr <- data.frame(cbind((sp[, 1] - xmin)/abs(xmax - xmin), (sp[, 2] -
                                                                  ymin)/abs(ymax - ymin))) # data preparation
    mask <- adehabitatMA::ascgen(SpatialPoints(cbind((0:(R))/R, (0:(R)/R))),
                                 nrcol = R-2, count = FALSE) # data preparation
    sp.dens <- adehabitatHR::kernelUD(SpatialPoints(spr[, 1:2]), h = "href", grid = mask,
                                      kern = "bivnorm") # calculate the density of occurrences in a grid of RxR pixels along the score gradients
    sp.dens <- raster(xmn = xmin, xmx = xmax, ymn = ymin, ymx = ymax, matrix(sp.dens$ud,
                                                                             nrow = R))
    # using a gaussian kernel density function, with RxR bins.
    # sp.dens$var[sp.dens$var>0 & sp.dens$var<1]<-0
    glob1.dens <- adehabitatHR::kernelUD(SpatialPoints(glob1r[, 1:2]), grid = mask, kern = "bivnorm")
    glob1.dens <- raster(xmn = xmin, xmx = xmax, ymn = ymin, ymx = ymax,
                         matrix(glob1.dens$ud, nrow = R))
    # glob1.dens$var[glob1.dens$var<1 & glob1.dens$var>0]<-0
    
    x <- seq(from = min(glob[, 1]), to = max(glob[, 1]), length.out = R) # breaks on score gradient 1
    y <- seq(from = min(glob[, 2]), to = max(glob[, 2]), length.out = R) # breaks on score gradient 2
    glob1r <- extract(glob1.dens, glob1)
    Z.th <- quantile(glob1r, th.env,na.rm=removeNA)
    glob1.dens[glob1.dens < Z.th] <- 0
    if (!is.null(geomask)) {
      proj4string(geomask) <- NA
      glob1.dens <- mask(glob1.dens, geomask, updatevalue = 0) # Geographical mask in the case if the analysis takes place in the geographical space
    }
    Z <- glob1.dens * nrow(glob1)/cellStats(glob1.dens, "sum")
    
    spr <- extract(sp.dens, sp)
    z.th <- quantile(spr, th.sp)
    sp.dens[Z == 0] <- 0
    sp.dens[sp.dens < z.th] <- 0
    if (!is.null(geomask)) {
      sp.dens <- mask(sp.dens, geomask, updatevalue = 0) # Geographical mask in the case if the analysis takes place in the geographical space
    }
    z <- sp.dens * nrow(sp)/cellStats(sp.dens, "sum")
    z.uncor <- z/cellStats(z, "max")
    w <- z.uncor # remove infinitesimally small number generated by kernel density function
    w[w > 0] <- 1
    z.cor <- z/Z # correct for environment prevalence
    z.cor[is.na(z.cor)] <- 0 # remove n/0 situations
    z.cor <- z.cor/cellStats(z.cor, "max")
    l$x <- x
    l$y <- y
    l$z <- z
    l$z.uncor <- z.uncor
    l$z.cor <- z.cor
    l$Z <- Z
    l$glob <- glob
    l$glob1 <- glob1
    l$sp <- sp
    l$w <- w
    
  }
  
  return(l)
}
kaiyaprovost/subsppLabelR documentation built on June 9, 2025, 2:44 a.m.