#' Compute dissimilarity between two wavelet spectra
#'
#' @author Tarik C. Gouhier (tarik.gouhier@@gmail.com)
#'
#' @param wt1 \code{power}, \code{wave} or \code{rsq} matrix from
#' \code{biwavelet} object generated by \code{\link{wt}}, \code{\link{xwt}},
#' or \code{\link{wtc}}.
#' @param wt2 \code{power}, \code{wave} or \code{rsq} matrix from
#' \code{biwavelet} object generated by \code{\link{wt}}, \code{\link{xwt}},
#' or \code{\link{wtc}}.
#' @param cutoff Cutoff value used to compute dissimilarity. Only orthogonal
#' axes that contribute more than \code{1-cutoff} to the total covariance
#' between the two wavelet spectra will be used to compute their
#' dissimilarity.
#'
#' @return Returns wavelet dissimilarity.
#'
#' @references
#' Rouyer, T., J. M. Fromentin, F. Menard, B. Cazelles, K. Briand, R. Pianet,
#' B. Planque, and N. C. Stenseth. 2008. Complex interplays among population
#' dynamics, environmental forcing, and exploitation in fisheries.
#' \emph{Proceedings of the National Academy of Sciences} 105:5420-5425.
#'
#' Rouyer, T., J. M. Fromentin, N. C. Stenseth, and B. Cazelles. 2008.
#' Analysing multiple time series and extending significance testing in
#' wavelet analysis. \emph{Marine Ecology Progress Series} 359:11-23.
#'
#' @examples
#' t1 <- cbind(1:100, sin(seq(0, 10 * 2 * pi, length.out = 100)))
#' t2 <- cbind(1:100, sin(seq(0, 10 * 2 * pi, length.out = 100) + 0.1 * pi))
#'
#' # Compute wavelet spectra
#' wt.t1 <- wt(t1)
#' wt.t2 <- wt(t2)
#'
#' # Compute dissimilarity
#' wdist(wt.t1$wave, wt.t2$wave)
#'
#' @export
wdist <- function(wt1, wt2, cutoff = 0.99) {
wcov <- Re(wt1) %*% t(Re(wt2))
wsvd <- svd(wcov)
## Cutoff point: find first value greater than cutoff (select min of 3 freqs)
nfreqs <- max(3, which(cumsum(sqrt(wsvd$d)) / sum(sqrt(wsvd$d)) >= cutoff)[1])
u <- wsvd$u[seq_len(nfreqs),]
v <- wsvd$v[seq_len(nfreqs),]
Lnk <- t(u[, seq_len(nfreqs)]) %*% Re(wt1[seq_len(nfreqs),])
Ljk <- t(v[, seq_len(nfreqs)]) %*% Re(wt2[seq_len(nfreqs),])
# Distances 1
D1 <- rowSums(atan(abs(
(Lnk[, seq_len(NCOL(Lnk) - 1)] - Ljk[, seq_len(NCOL(Ljk) - 1)]) -
(Lnk[, 2:NCOL(Lnk)] - Ljk[, 2:NCOL(Ljk)]))))
# Distances 2
D2 <- rowSums(atan(abs(
(u[, seq_len(NROW(u) - 1)] - v[, seq_len(NROW(v) - 1)]) -
(u[,2:NROW(u)] - v[,2:NROW(v)]))))
# Weights based on the amount of variance explained by each axis
w <- sqrt(wsvd$d[seq_len(nfreqs)]) / sum(sqrt(wsvd$d[seq_len(nfreqs)]))
weighted.mean(D1 + D2, w)
}
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.