Nothing
#' Prediction of Confidence Rating and Reaction Time Distribution in the
#' multiple-threshold log-normal noise race model
#'
#' \code{predictMTLNR_Conf} predicts the categorical response distribution of
#' decision and confidence ratings, \code{predictMTLNR_RT} computes the
#' RT distribution (density) in the multiple-threshold log-normal noise
#' race model (Reynolds et al., 2020), given specific parameter
#' constellations. See \code{\link{dMTLNR}} for more information about the models
#' and parameters.
#'
#' @param paramDf a list or data frame with one row. Column names should
#' match the following names (see \link{dMTLNR}):
#' For different stimulus quality/mean
#' drift rates, names should be `v1`, `v2`, `v3`,.... (corresponding to the mean
#' parameter for the accumulation rate for the stimulus-corresponding accumulator,
#' therefore `mu_v1` and `mu_v2` are not used in this context but
#' replaced by the parameter `v`); `mu_d1` and `mu_d2` correspond to the mean
#' parameters for boundary distance of the two accumulators;
#' `s1` and `s2` correspond to the variance parameters of the first and
#' second boundary hitting time;
#' `rho` corresponds to the correlation of boundary hitting times.
#' Note that `s_v1`,`s_v2`,`rho_v`,`s_d1`,`s_d2`, and `rho_d` are not used in this
#' context, although the accumulation rate-related parameters can be used to replace
#' the above-mentioned variance parameters.
#' Additionally, the confidence thresholds should be given by names with
#' `thetaUpper1`, `thetaUpper2`,..., `thetaLower1`,... or,
#' for symmetric thresholds only by `theta1`, `theta2`,.... (see Details for the correspondence to the data)
#' @param maxrt numeric. The maximum RT for the
#' integration/density computation. Default: 15 (for `predictMTLNR_Conf`
#' (integration)), 9 (for `predictMTLNR_RT`).
#' @param subdivisions \code{integer} (default: 100).
#' For \code{predictMTLNR_Conf} it is used as argument for the inner integral routine.
#' For \code{predictMTLNR_RT} it is the number of points for which the density is computed.
#' @param minrt numeric or NULL(default). The minimum rt for the density computation.
#' @param scaled logical. For \code{predictMTLNR_RT}. Whether the computed density
#' should be scaled to integrate to one (additional column `densscaled`). Otherwise the output
#' contains only the defective density (i.e. its integral is equal to the probability of a
#' response and not 1). If `TRUE`, the argument `DistConf` should be given, if available.
#' Default: `FALSE`.
#' @param DistConf `NULL` or `data.frame`. A `data.frame` or `matrix` with column
#' names, giving the distribution of response and rating choices for
#' different conditions and stimulus categories in the form of the output of
#' \code{predictMTLNR_Conf}. It is only necessary, if `scaled=TRUE`, because these
#' probabilities are used for scaling. If `scaled=TRUE` and `DistConf=NULL`, it will be computed
#' with the function \code{predictMTLNR_Conf}, which takes some time and the function will
#' throw a message. Default: `NULL`
#' @param stop.on.error logical. Argument directly passed on to integrate. Default is FALSE,
#' since the densities invoked may lead to slow convergence of the integrals (which are still
#' quite accurate) which causes R to throw an error.
#' @param .progress logical. If TRUE (default) a progress bar is drawn to the console.
#'
#' @return \code{predictMTLNR_Conf} returns a `data.frame`/`tibble` with columns: `condition`, `stimulus`,
#' `response`, `rating`, `correct`, `p`, `info`, `err`. `p` is the predicted probability of a response
#' and `rating`, given the stimulus category and condition. `info` and `err` refer to the
#' respective outputs of the integration routine used for the computation.
#' \code{predictMTLNR_RT} returns a `data.frame`/`tibble` with columns: `condition`, `stimulus`,
#' `response`, `rating`, `correct`, `rt` and `dens` (and `densscaled`, if `scaled=TRUE`).
#'
#'
#' @details The function \code{predictMTLNR_Conf} consists merely of an integration of
#' the response time density, \code{\link{dMTLNR}}, over the
#' response time in a reasonable interval (0 to `maxrt`). The function
#' \code{predictMTLNR_RT} wraps these density
#' functions to a parameter set input and a data.frame output.
#' For the argument \code{paramDf}, the output of the fitting function \code{\link{fitRTConf}}
#' with the respective model may be used.
#'
#' The names of the accumulation rate parameters differ from those used in
#' \code{\link{dMTLNR}} because the accumulation rates for the two options depend
#' on stimulus and condition. Here, the mean parameter for the accumulation rate
#' of the correct accumulator is `v` (`v1`, `v2`,... respectively) in `paramDf`
#' and the other one has a mean parameter of 0.
#'
#' @note Different parameters for different conditions are only allowed for drift rate,
#' \code{v}. All other parameters are used for all
#' conditions.
#'
#' @references Reynolds, A., Kvam, P. D., Osth, A. F., & Heathcote, A. (2020). Correlated racing evidence accumulator models. \emph{Journal of Mathematical Psychology, 96}, 102331. doi: doi: 10.1016/j.jmp.2020.102331
#'
#' @author Sebastian Hellmann.
#'
#' @name predictMTLNR
#' @importFrom stats integrate
#' @importFrom progress progress_bar
# @importFrom pracma integral
#'
# @examples
#' @examples
#' # 1. Define some parameter set in a data.frame
#' paramDf <- data.frame(v1=0.5, v2=1.0, t0=0.1, st0=0,
#' mu_d1=1, mu_d2=1,
#' s1=0.5, s2=0.5, rho=0.2,
#' theta1=0.8, theta2=1.5)
#'
#' # 2. Predict discrete Choice x Confidence distribution:
#' preds_Conf <- predictMTLNR_Conf(paramDf, maxrt=7, subdivisions=50)
#' head(preds_Conf)
#'
#' # 3. Compute RT density
#' preds_RT <- predictMTLNR_RT(paramDf, maxrt=7, subdivisions=50)
#' # same output with scaled density column:
#' preds_RT <- predictMTLNR_RT(paramDf, maxrt=7, subdivisions=50,
#' scaled=TRUE, DistConf = preds_Conf)
#' head(preds_RT)
#' \donttest{
#' # produces a warning, if scaled=TRUE and DistConf missing
#' preds_RT <- predictMTLNR_RT(paramDf, maxrt=7, subdivisions=50,
#' scaled=TRUE)
#' }
#'
#' \donttest{
#' # Example of visualization
#' library(ggplot2)
#' preds_Conf$rating <- factor(preds_Conf$rating, labels=c("unsure", "medium", "sure"))
#' preds_RT$rating <- factor(preds_RT$rating, labels=c("unsure", "medium", "sure"))
#' ggplot(preds_Conf, aes(x=interaction(rating, response), y=p))+
#' geom_bar(stat="identity")+
#' facet_grid(cols=vars(stimulus), rows=vars(condition), labeller = "label_both")
#' ggplot(preds_RT, aes(x=rt, color=interaction(rating, response), y=dens))+
#' geom_line(stat="identity")+
#' facet_grid(cols=vars(stimulus), rows=vars(condition), labeller = "label_both")+
#' theme(legend.position = "bottom")
#' ggplot(aggregate(densscaled~rt+correct+rating+condition, preds_RT, mean),
#' aes(x=rt, color=rating, y=densscaled))+
#' geom_line(stat="identity")+
#' facet_grid(cols=vars(condition), rows=vars(correct), labeller = "label_both")+
#' theme(legend.position = "bottom")
#' }
#' \donttest{
#' # Use PDFtoQuantiles to get predicted RT quantiles
#' # (produces warning because of few rt steps (--> inaccurate calculations))
#' PDFtoQuantiles(preds_RT, scaled = FALSE)
#' }
#'
#' # Example with asymmetric confidence thresholds
#' paramDf_asym <- data.frame(v1=0.5, v2=1.0, t0=0.1, st0=0,
#' mu_d1=1, mu_d2=1,
#' s1=0.5, s2=0.5, rho=0.2,
#' thetaLower1=0.5, thetaLower2=1.2,
#' thetaUpper1=0.7, thetaUpper2=1.8)
#'
#' preds_Conf_asym <- predictMTLNR_Conf(paramDf_asym, maxrt=7, subdivisions=50)
#' head(preds_Conf_asym)
#'
#' # Example with multiple conditions
#' paramDf_multi <- data.frame(v1=0.3, v2=0.6, v3=1.2, t0=0.1, st0=0,
#' mu_d1=1, mu_d2=1,
#' s1=0.5, s2=0.5, rho=0.2,
#' theta1=0.8, theta2=1.5)
#'
#' preds_Conf_multi <- predictMTLNR_Conf(paramDf_multi, maxrt=7, subdivisions=50)
#' head(preds_Conf_multi)
#'
#' @rdname predictMTLNR
#' @export
predictMTLNR_Conf <- function(paramDf,
maxrt=Inf, subdivisions = 100L, stop.on.error=FALSE,
.progress=TRUE){
nConds <- length(grep(pattern = "^v[0-9]", names(paramDf), value = T))
symmetric_confidence_thresholds <- length(grep(pattern = "thetaUpper", names(paramDf), value = T))<1
if (symmetric_confidence_thresholds) {
nRatings <- length(grep(pattern = "^theta[0-9]", names(paramDf)))+1
} else {
nRatings <- length(grep(pattern = "^thetaUpper[0-9]", names(paramDf)))+1
}
if (nConds > 0 ) {
V <- c(t(paramDf[,paste("v",1:(nConds), sep = "")]))
} else {
V <- paramDf$v
nConds <- 1
}
## Recover confidence thresholds
if (symmetric_confidence_thresholds) {
if (nRatings==1) {
thetas_1 <- c(0, 1e+64)
thetas_2 <- c(0, 1e+64)
} else {
thetas_1 <- c(0, t(paramDf[,paste("theta",1:(nRatings-1), sep = "")]), 1e+64)
thetas_2 <- c(0, t(paramDf[,paste("theta",1:(nRatings-1), sep = "")]), 1e+64)
}
} else {
thetas_1 <- c(0, t(paramDf[,paste("thetaLower",1:(nRatings-1), sep = "")]), 1e+64)
thetas_2 <- c(0, t(paramDf[,paste("thetaUpper",1:(nRatings-1), sep="")]), 1e+64)
}
if ("s_v1" %in% names(paramDf)) paramDf$s1 <- paramDf$s_v1
if ("s_v2" %in% names(paramDf)) paramDf$s2 <- paramDf$s_v2
if ("rho_v" %in% names(paramDf)) paramDf$rho <- paramDf$rho_v
# Because we integrate over the response time, st0 does not matter
# So, to speed up computations for high values of st0, we set it to 0
# but add the constant to maxrt
maxrt <- maxrt + paramDf$st0
res <- expand.grid(condition = 1:nConds, stimulus=c(1,2),
response=c(1,2), rating = 1:nRatings,
p=NA, info=NA, err=NA)
if (.progress) {
pb <- progress_bar$new(total = nConds*nRatings*4)
}
for (i in 1:nrow(res)) {
row <- res[i, ]
th1 = ifelse(row$response == 1, thetas_1[(row$rating)], thetas_2[(row$rating)])
th2 = ifelse(row$response == 1, thetas_1[(row$rating+1)], thetas_2[(row$rating + 1)])
mu1 = V[row$condition]*as.numeric(row$stimulus==1)
mu2 = V[row$condition]*as.numeric(row$stimulus==2)
p <- integrate(function(rt) return(dMTLNR(rt, response=row$response,
th1=th1, th2=th2,
mu_v1=mu1, mu_v2 = mu2,
s_v1=paramDf$s1, s_v2=paramDf$s2,
rho_v=paramDf$rho,
mu_d1=paramDf$mu_d1, mu_d2=paramDf$mu_d2,
s_d1=0, s_d2=0,rho_d=0,
# as we integrate over t0, setting st0=0 does not change results
# but speeds up the integration considerably
t0=0,st0 = 0)),
lower=0, upper=maxrt, subdivisions = subdivisions,
stop.on.error = stop.on.error)
if (.progress) pb$tick()
res[i, 5:7] <- list(p = p$value, info = p$message, err = p$abs.error)
}
res$correct <- as.numeric(res$stimulus==res$response)
res <- res[c("condition", "stimulus", "response", "correct", "rating", "p", "info", "err")]
# the last line is to sort the output columns
# (to combine outputs from predictWEV_Conf and predictDDConf_Conf)
res
}
### Predict RT-distribution
#' @rdname predictMTLNR
#' @export
predictMTLNR_RT <- function(paramDf,
maxrt=9, subdivisions = 100L, minrt=NULL,
scaled = FALSE, DistConf=NULL,
.progress = TRUE) {
if (scaled && is.null(DistConf)) {
message(paste("scaled is TRUE and DistConf is NULL. The rating distribution will",
" be computed, which will take additional time.", sep=""))
}
nConds <- length(grep(pattern = "^v[0-9]", names(paramDf), value = T))
symmetric_confidence_thresholds <- length(grep(pattern = "thetaUpper", names(paramDf), value = T))<1
if (symmetric_confidence_thresholds) {
nRatings <- length(grep(pattern = "^theta[0-9]", names(paramDf)))+1
} else {
nRatings <- length(grep(pattern = "^thetaUpper[0-9]", names(paramDf)))+1
}
if (nConds > 0 ) {
V <- c(t(paramDf[,paste("v",1:(nConds), sep = "")]))
} else {
V <- paramDf$v
nConds <- 1
}
## Recover confidence thresholds
if (symmetric_confidence_thresholds) {
if (nRatings==1) {
thetas_1 <- c(0, 1e+32)
thetas_2 <- c(0, 1e+32)
} else {
thetas_1 <- c(0, t(paramDf[,paste("theta",1:(nRatings-1), sep = "")]), 1e+64)
thetas_2 <- c(0, t(paramDf[,paste("theta",1:(nRatings-1), sep = "")]), 1e+64)
}
} else {
thetas_1 <- c(0, t(paramDf[,paste("thetaLower",1:(nRatings-1), sep = "")]), 1e+64)
thetas_2 <- c(0, t(paramDf[,paste("thetaUpper",1:(nRatings-1), sep="")]), 1e+64)
}
if ("s_v1" %in% names(paramDf)) paramDf$s1 <- paramDf$s_v1
if ("s_v2" %in% names(paramDf)) paramDf$s2 <- paramDf$s_v2
if ("rho_v" %in% names(paramDf)) paramDf$rho <- paramDf$rho_v
if (is.null(minrt)) minrt <- paramDf$t0
rt = seq(minrt, maxrt, length.out = subdivisions)
df <- expand.grid(rt = rt,
rating = 1:nRatings,
response=c(1,2),
stimulus=c(1,2),
condition = 1:nConds, dens=NA)
if (scaled) {
## Scale RT-density to integrate to 1 (for plotting together with simulations)
# Therefore, divide the density by the probability of a
# decision-rating-response (as in data.frame DistConf)
if (is.null(DistConf)) {
DistConf <- predictMTLNR_Conf(paramDf,
maxrt = maxrt, subdivisions=subdivisions,
.progress = FALSE)
}
DistConf <- DistConf[,c("rating", "response", "stimulus", "condition", "p")]
df$densscaled <- NA
}
if (.progress) {
pb <- progress_bar$new(total = nConds*nRatings*4)
}
for ( i in 1:(nRatings*2*2*nConds)) {
cur_row <- df[1+((i-1)*subdivisions),]
th1 <- ifelse(cur_row$response==1, thetas_1[(cur_row$rating)], thetas_2[(cur_row$rating)])
th2 <- ifelse(cur_row$response==1, thetas_1[(cur_row$rating+1)], thetas_2[(cur_row$rating+1)])
mu1 <- V[cur_row$condition]*as.numeric(cur_row$stimulus==1)
mu2 <- V[cur_row$condition]*as.numeric(cur_row$stimulus==2)
df[(1:subdivisions) + subdivisions*(i-1), "dens"] <-
dMTLNR(rt, cur_row$response,
th1 = th1, th2=th2,
mu_v1=mu1, mu_v2 = mu2,
s_v1=paramDf$s1, s_v2=paramDf$s2,
rho_v=paramDf$rho,
mu_d1=paramDf$mu_d1, mu_d2=paramDf$mu_d2,
s_d1=0, s_d2=0, rho_d=0,
t0 = paramDf$t0, st0 = paramDf$st0)
if (scaled) {
P <- DistConf[DistConf$condition==cur_row$condition &
DistConf$response==cur_row$response &
DistConf$rating == cur_row$rating &
DistConf$stimulus==cur_row$stimulus,]$p
if (P != 0) {
df[(1:subdivisions) + subdivisions*(i-1), "densscaled"] <-
df[(1:subdivisions) + subdivisions*(i - 1), "dens"]/P
} else {
df[(1:subdivisions) + subdivisions*(i-1), "densscaled"] <- 0
}
}
if (.progress) pb$tick()
}
df$correct <- as.numeric(df$stimulus==df$response)
df <- df[,c("condition", "stimulus", "response", "correct", "rating",
"rt", "dens", rep("densscaled", as.numeric(scaled)))]
# the last line is to sort the output columns
# (to combine outputs from predictWEV_RT and predictDDConf_RT)
return(df)
}
Any scripts or data that you put into this service are public.
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.