Nothing
require(testthat)
require(methods)
require(nimble)
## These make it clear that error messages are expected.
if(FALSE) {
expect_failure <- function(...) {
cat('BEGIN expected failure:\n', file = stderr())
testthat:::expect_failure(...)
argList <- list(...)
cat(argList$info)
cat('\nEND expected failure.\n', file = stderr())
}
}
## Mark tests that are know to fail with `if(RUN_FAILING_TESTS)`.
## By default these tests will not be run, but we will occasionally clean up by running them with
## At the moment only used in test-optim.R; otherwise we have
## mechanisms in various test files for wrapping failing tests
## in expect_failure.
## $ RUN_FAILING_TESTS=1 Rscript test-my-stuff.R
RUN_FAILING_TESTS <- (nchar(Sys.getenv('RUN_FAILING_TESTS')) != 0)
## We can get problems on Windows with system(cmd) if cmd contains
## paths with spaces in directory names. Solution is to make a cmd
## string with only local names (to avoid spaces) and indicate what
## directory it should be done in. setwd(dir) should handle dir with
## paths with spaces ok.
system.in.dir <- function(cmd, dir = '.') {
curDir <- getwd()
on.exit(setwd(curDir), add = TRUE)
setwd(dir)
if(.Platform$OS.type == "windows")
shell(shQuote(cmd))
else
system(cmd)
}
## This sets up sink to also capture messages (in particular warnings).
sink_with_messages <- function(file, ...) {
sinkfile <- file(file, open = 'wt')
sink(sinkfile)
sink(sinkfile, type = 'message')
}
## This is useful for working around scoping issues with nimbleFunctions using other nimbleFunctions.
temporarilyAssignInGlobalEnv <- function(value, replace = FALSE) {
name <- deparse(substitute(value))
assign(name, value, envir = .GlobalEnv)
if(!replace) {
rmCommand <- substitute(remove(name, envir = .GlobalEnv))
do.call('on.exit', list(rmCommand, add = TRUE), envir = parent.frame())
}
}
withTempProject <- function(code) {
code <- substitute(code)
project <- nimble:::nimbleProjectClass()
nimbleOptions(nimbleProjectForTesting = project)
on.exit({
## messages are suppressed by test_that, so "assign('.check', 1, globalenv())" can be used as a way to verify this code was called
.project <- nimbleOptions('nimbleProjectForTesting')
nimbleOptions(nimbleProject = NULL) ## clear this before clearCompiled step, in case clearCompiled() itself fails
.project$clearCompiled()
}, add = TRUE)
eval(code)
}
expect_compiles <- function(..., info = NULL, link = FALSE, forceO1 = TRUE) {
oldSCBL <- nimbleOptions('stopCompilationBeforeLinking')
nimbleOptions(stopCompilationBeforeLinking = !link)
oldForceO1 <- nimbleOptions('forceO1')
nimbleOptions(forceO1 = forceO1)
on.exit({
assign('.check', 1, globalenv())
nimbleOptions(stopCompilationBeforeLinking = oldSCBL)
nimbleOptions(forceO1 = oldForceO1)
}, add = TRUE)
if(!link) {
ans <- try(compileNimble(...)) ## expecting a thrown error
expect_identical(as.character(ans), 'Error : safely stopping before linking\n', info = info)
} else {
ans <- compileNimble(...)
ans
}
}
gen_runFunCore <- function(input) {
runFun <- function() {}
formalsList <- input$args
if(is.null(formalsList)) formalsList <- list()
if(is.null(names(formalsList)))
if(length(formalsList) > 0)
names(formalsList) <- paste0('arg', seq_along(input$args))
formals(runFun) <- formalsList
tmp <- quote({})
tmp[[2]] <- input$expr
tmp[[3]] <- if(is.null(input[['return']]))
quote(return(out))
else
input[['return']]
tmp[[4]] <- substitute(returnType(OUT), list(OUT = input$outputType))
body(runFun) <- tmp
return(runFun)
}
## Indexes the names of a list of input lists for test_coreRfeature
indexNames <- function(x) {
i <- 1
lapply(x, function(z) {z$name <- paste(i, z$name); i <<- i + 1; z})
}
test_coreRfeature_batch <- function(input_batch, info = '', verbose = nimbleOptions('verbose'), dirName = NULL) {
test_coreRfeature_batch_internal(input_batch, verbose = verbose, dirName)
}
test_coreRfeature_batch_internal <- function(input_batch, verbose = nimbleOptions('verbose'), dirName = NULL) { ## a lot like test_math but a bit more flexible
names(input_batch) <- paste0('batch_case_', seq_along(input_batch))
runFuns <- lapply(input_batch, gen_runFunCore)
nfR <- nimbleFunction(setup = TRUE, methods = runFuns)()
nfC <- compileNimble(nfR, dirName = dirName)
for(i in seq_along(input_batch)) {
input <- input_batch[[i]]
if(verbose) cat("### Testing", input$name, "###\n")
test_that(input$name, {
funName <- names(input_batch)[i]
if(is.null(input$expectWarnings))
input$expectWarnings <- list()
nArgs <- length(input$args)
evalEnv <- new.env()
eval(input$setArgVals, envir = evalEnv)
savedArgs <- as.list(evalEnv)
seedToUse <- if(is.null(input[['seed']])) 31415927 else input[['seed']]
set.seed(seedToUse)
wrap_if_matches("R eval", names(input$expectWarnings), expect_warning, {
eval(input$expr, envir = evalEnv)
})
savedOutputs <- as.list(evalEnv)
list2env(savedArgs, envir = evalEnv)
## with R ref classes, lookup of methods via `[[` does not work until it has been done via `$`
## force it via `$` here to allow simpler syntax below
forceFind <- eval(substitute(nfR$FUNNAME, list(FUNNAME = as.name(funName))))
forceFind <- eval(substitute(nfC$FUNNAME, list(FUNNAME = as.name(funName))))
if(nArgs == 5) {
set.seed(seedToUse)
wrap_if_matches("R run", names(input$expectWarnings), expect_warning, {
out_nfR = nfR[[funName]](evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4, evalEnv$arg5)})
list2env(savedArgs, envir = evalEnv)
set.seed(seedToUse)
out_nfC = nfC[[funName]](evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4, evalEnv$arg5)
}
if(nArgs == 4) {
set.seed(seedToUse)
wrap_if_matches("R run", names(input$expectWarnings), expect_warning, {
out_nfR = nfR[[funName]](evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4)
})
list2env(savedArgs, envir = evalEnv)
set.seed(seedToUse)
out_nfC = nfC[[funName]](evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4)
}
if(nArgs == 3) {
set.seed(seedToUse)
wrap_if_matches("R run", names(input$expectWarnings), expect_warning, {
out_nfR = nfR[[funName]](evalEnv$arg1, evalEnv$arg2, evalEnv$arg3)
})
list2env(savedArgs, envir = evalEnv)
set.seed(seedToUse)
out_nfC = nfC[[funName]](evalEnv$arg1, evalEnv$arg2, evalEnv$arg3)
}
if(nArgs == 2) {
set.seed(seedToUse)
wrap_if_matches("R run", names(input$expectWarnings), expect_warning, {
out_nfR = nfR[[funName]](evalEnv$arg1, evalEnv$arg2)
})
list2env(savedArgs, envir = evalEnv)
set.seed(seedToUse)
out_nfC = nfC[[funName]](evalEnv$arg1, evalEnv$arg2)
}
if(nArgs == 1) {
set.seed(seedToUse)
wrap_if_matches("R run", names(input$expectWarnings), expect_warning, {
out_nfR = nfR[[funName]](evalEnv$arg1)
})
list2env(savedArgs, envir = evalEnv)
set.seed(seedToUse)
out_nfC = nfC[[funName]](evalEnv$arg1)
}
if(nArgs == 0) {
set.seed(seedToUse)
wrap_if_matches("R run", names(input$expectWarnings), expect_warning, {
out_nfR = nfR[[funName]]()
})
list2env(savedArgs, envir = evalEnv)
set.seed(seedToUse)
out_nfC = nfC[[funName]]()
}
out <- savedOutputs$out
## clear any attributes except dim
dimOut <- attr(out, 'dim')
dimOutR <- attr(out_nfR, 'dim')
dimOutC <- attr(out_nfC, 'dim')
attributes(out) <- attributes(out_nfR) <- attributes(out_nfC) <- NULL
if(!is.null(input[['storage.mode']]))
storage.mode(out) <- storage.mode(out_nfR) <- storage.mode(out_nfC) <- input[['storage.mode']]
attr(out, 'dim') <- dimOut
attr(out_nfR, 'dim') <- dimOutR
attr(out_nfC, 'dim') <- dimOutC
checkEqual <- input[['checkEqual']]
if(is.null(checkEqual)) checkEqual <- FALSE
if(is.null(input[['return']])) { ## use default 'out' object
if(!checkEqual) {
expect_identical(class(out), class(out_nfC), info = paste('iden tmp test of class', class(out), class(out_nfC)))
expect_identical(dim(out), dim(out_nfC), info = 'iden test of dim')
expect_identical(round(out, 10), round(out_nfC, 10), info='iden test of round')
expect_identical(out, out_nfR, info = "Identical test of coreRfeature (direct R vs. R nimbleFunction)")
wh <- which.max(abs(out - out_nfC))
expect_identical(out, out_nfC, info = paste("Identical test of coreRfeature (direct R vs. C++ nimbleFunction)", system('gcc --version', intern = T)[1], ' ', sprintf("%0.20f", out[wh]), " ", sprintf("%0.20f", out_nfC[wh])))
} else {
expect_equal(out, out_nfR, info = "Equal test of coreRfeature (direct R vs. R nimbleFunction)")
expect_equal(out, out_nfC, info = "Equal test of coreRfeature (direct R vs. C++ nimbleFunction)")
}
} else { ## not using default return(out), so only compare out_nfR to out_nfC
if(!checkEqual) {
expect_identical(out_nfC, out_nfR, info = "Identical test of coreRfeature (compiled vs. uncompied nimbleFunction)")
} else {
expect_identical(out_nfC, out_nfR, info = "Equal test of coreRfeature (compiled vs. uncompied nimbleFunction)")
}
}
})
}
## unload DLL as R doesn't like to have too many loaded
if(.Platform$OS.type != 'windows') nimble:::clearCompiled(nfR) ##dyn.unload(project$cppProjects[[1]]$getSOName())
invisible(NULL)
}
test_coreRfeature <- function(input, verbose = nimbleOptions('verbose'), dirName = NULL) {
test_that(input$name, {
test_coreRfeature_internal(input, verbose, dirName)
})
}
test_coreRfeature_internal <- function(input, verbose = nimbleOptions('verbose'), dirName = NULL) { ## a lot like test_math but a bit more flexible
if(verbose) cat("### Testing", input$name, "###\n")
runFun <- gen_runFunCore(input)
nfR <- nimbleFunction(run = runFun)
## This try is safe because failure is caught by expect_equal below
expectedCompilerFail <- FALSE
if(!is.null(input[['expectedCompilerError']]))
if(isTRUE(input[['expectedCompilerError']]))
expectedCompilerError <- TRUE
if(!expectedCompilerError) {
nfC <- compileNimble(nfR, dirName = dirName)
} else {
expect_error(nfC <- compileNimble(nfR, dirName = dirName))
return()
}
nArgs <- length(input$args)
evalEnv <- new.env()
eval(input$setArgVals, envir = evalEnv)
savedArgs <- as.list(evalEnv)
seedToUse <- if(is.null(input[['seed']])) 31415927 else input[['seed']]
set.seed(seedToUse)
eval(input$expr, envir = evalEnv)
savedOutputs <- as.list(evalEnv)
list2env(savedArgs, envir = evalEnv)
if(nArgs == 5) {
set.seed(seedToUse)
out_nfR = nfR(evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4, evalEnv$arg5)
list2env(savedArgs, envir = evalEnv)
set.seed(seedToUse)
out_nfC = nfC(evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4, evalEnv$arg5)
}
if(nArgs == 4) {
set.seed(seedToUse)
out_nfR = nfR(evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4)
list2env(savedArgs, envir = evalEnv)
set.seed(seedToUse)
out_nfC = nfC(evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4)
}
if(nArgs == 3) {
set.seed(seedToUse)
out_nfR = nfR(evalEnv$arg1, evalEnv$arg2, evalEnv$arg3)
list2env(savedArgs, envir = evalEnv)
set.seed(seedToUse)
out_nfC = nfC(evalEnv$arg1, evalEnv$arg2, evalEnv$arg3)
}
if(nArgs == 2) {
set.seed(seedToUse)
out_nfR = nfR(evalEnv$arg1, evalEnv$arg2)
list2env(savedArgs, envir = evalEnv)
set.seed(seedToUse)
out_nfC = nfC(evalEnv$arg1, evalEnv$arg2)
}
if(nArgs == 1) {
set.seed(seedToUse)
out_nfR = nfR(evalEnv$arg1)
list2env(savedArgs, envir = evalEnv)
set.seed(seedToUse)
out_nfC = nfC(evalEnv$arg1)
}
if(nArgs == 0) {
set.seed(seedToUse)
out_nfR = nfR()
list2env(savedArgs, envir = evalEnv)
set.seed(seedToUse)
out_nfC = nfC()
}
out <- savedOutputs$out
## clearn any attributes except dim
dimOut <- attr(out, 'dim')
dimOutR <- attr(out_nfR, 'dim')
dimOutC <- attr(out_nfC, 'dim')
attributes(out) <- attributes(out_nfR) <- attributes(out_nfC) <- NULL
attr(out, 'dim') <- dimOut
attr(out_nfR, 'dim') <- dimOutR
attr(out_nfC, 'dim') <- dimOutC
checkEqual <- input[['checkEqual']]
if(is.null(checkEqual)) checkEqual <- FALSE
if(is.null(input[['return']])) { ## use default 'out' object
if(!checkEqual) {
expect_identical(out, out_nfR, info = paste0("FOO Identical test of coreRfeature (direct R vs. R nimbleFunction): ", input$name))
expect_identical(out, out_nfC, info = paste0("FOO Identical test of coreRfeature (direct R vs. C++ nimbleFunction): ", input$name))
} else {
expect_equal(out, out_nfR, info = paste0("Equal test of coreRfeature (direct R vs. R nimbleFunction): ", input$name) )
expect_equal(out, out_nfC, info = paste0("Equal test of coreRfeature (direct R vs. C++ nimbleFunction): ", input$name))
}
} else { ## not using default return(out), so only compare out_nfR to out_nfC
if(!checkEqual) {
expect_identical(out_nfC, out_nfR, info = paste0("Identical test of coreRfeature (compiled vs. uncompied nimbleFunction): ", input$name))
} else {
expect_identical(out_nfC, out_nfR, info = paste0("Equal test of coreRfeature (compiled vs. uncompied nimbleFunction): ", input$name))
}
}
# unload DLL as R doesn't like to have too many loaded
if(.Platform$OS.type != 'windows') nimble:::clearCompiled(nfR) ##dyn.unload(project$cppProjects[[1]]$getSOName())
invisible(NULL)
}
gen_runFun <- function(param, logicalArgs, returnType = "double") {
runFun <- function() {}
types <- rep('double', length(param$inputDim))
types[logicalArgs] <- 'logical'
types <- paste0(types, '(', param$inputDim, ')')
formalsList <- lapply(types, function(x) parse(text = x)[[1]])
names(formalsList) <- paste0('arg', seq_along(param$inputDim))
formals(runFun) <- formalsList
tmp <- quote({})
tmp[[2]] <- param$expr
tmp[[3]] <- quote(return(out))
tmp[[4]] <- parse(text = paste0("returnType(", returnType, "(", param$outputDim, "))"))[[1]]
body(runFun) <- tmp
return(runFun)
}
make_input <- function(dim, size = 3, logicalArg) {
if(!logicalArg) rfun <- rnorm else rfun <- function(n) { rbinom(n, 1, .5) }
if(dim == 0) return(rfun(1))
if(dim == 1) return(rfun(size))
if(dim == 2) return(matrix(rfun(size^2), size))
stop("not set for dimension greater than 2")
}
wrap_if_matches <- function(pattern, string, wrapper, expr) {
if (!is.null(pattern) && any(grepl(paste0('^', pattern, '$'), string))) {
wrapper(expr)
} else {
expr
}
}
wrap_if_true <- function(test, wrapper, expr, wrap_in_try = FALSE) {
wrap <- if (isTRUE(wrap_in_try))
function(x) try(x, silent = TRUE)
else identity
if (isTRUE(test)) wrap(wrapper(expr)) else wrap(expr)
}
## This is a parametrized test, where `param` is a list with names:
## param$name - A descriptive test name.
## param$expr - A quoted expression `quote(out <- some_function_of(arg1, arg2, ...))`.
## param$Rcode - Optional R version of expr.
## param$inputDim - A vector of dimensions of the input `arg`s.
## param$outputDim - The dimension of the output `out.
## param$xfail - Optional regular expression of tests that are expected to fail.
test_math <- function(param, caseName, verbose = nimbleOptions('verbose'), size = 3, dirName = NULL) {
info <- paste0(caseName, ': ', param$name)
## in some cases, expect_error does not suppress error messages (I believe this has
## to do with how we trap errors in compilation), so make sure user realizes expectation
if('knownFailureReport' %in% names(param) && param$knownFailureReport)
cat("\nBegin expected error message:\n")
test_that(info, {
## wrap_if_matches(param$xfail, paste0(info, ': compiles and runs'), expect_error, {
test_math_internal(param, info, verbose, size, dirName)
## })
})
if('knownFailureReport' %in% names(param) && param$knownFailureReport)
cat("End expected error message.\n")
invisible(NULL)
}
test_math_internal <- function(param, info, verbose = nimbleOptions('verbose'), size = 3, dirName = NULL) {
if(verbose) cat("### Testing", param$name, "###\n")
nArgs <- length(param$inputDim)
logicalArgs <- rep(FALSE, nArgs)
if("logicalArgs" %in% names(param))
logicalArgs <- param$logicalArgs
returnType <- "double"
if("returnType" %in% names(param))
returnType <- param$returnType
runFun <- gen_runFun(param, logicalArgs, returnType)
wrap_if_matches(param$expectWarnings, "builds", expect_warning, {
nfR <- nimbleFunction(
run = runFun)
})
info <- paste0(info, ": compiles")
## need expect_error not expect_failure(expect_something()) because otherwise
## R error will stop execution
wrap_if_matches(param$knownFailure, info, expect_error, {
nfC <- compileNimble(nfR, dirName = dirName)
arg1 <- make_input(param$inputDim[1], size = size, logicalArgs[1])
if(nArgs > 1)
arg2 <- make_input(param$inputDim[2], size = size, logicalArgs[2])
if(nArgs > 2)
arg3 <- make_input(param$inputDim[3], size = size, logicalArgs[3])
if(nArgs > 3)
stop("test_math not set up for >3 args yet")
if("Rcode" %in% names(param)) {
eval(param$Rcode)
} else {
eval(param$expr)
}
info <- paste0(info, ": runs")
wrap_if_matches(param$knownFailure, info, expect_failure, {
if(nArgs == 3) {
expect_silent(out_nfR <- nfR(arg1, arg2, arg3))
expect_silent(out_nfC <- nfC(arg1, arg2, arg3))
}
if(nArgs == 2) {
expect_silent(out_nfR <- nfR(arg1, arg2))
expect_silent(out_nfC <- nfC(arg1, arg2))
}
if(nArgs == 1) {
expect_silent(out_nfR <- nfR(arg1))
expect_silent(out_nfC <- nfC(arg1))
}
attributes(out) <- attributes(out_nfR) <- attributes(out_nfC) <- NULL
infoR <- paste0(info, ": R vs Nimble DSL")
wrap_if_matches(param$knownFailure, infoR, expect_failure, {
expect_equal(out, out_nfR, info = infoR)
})
infoC <- paste0(info, ": R vs Nimble Cpp")
wrap_if_matches(param$knownFailure, infoC, expect_failure, {
expect_equal(out, out_nfC, info = infoC)
})
})
# Unload DLL as R doesn't like to have too many loaded.
if(.Platform$OS.type != 'windows') nimble:::clearCompiled(nfR)
})
invisible(NULL)
}
### Function for testing MCMC called from test_mcmc.R
test_mcmc <- function(example, model, data = NULL, inits = NULL, ..., name = NULL, knownFailures = list(), expectWarnings = list(), avoidNestedTest = FALSE) {
## imitate processing test_mcmc_internal just to get a name for the test_that description
if(is.null(name)) {
if(!missing(example)) {
name <- example
} else {
if(is.character(model)) {
name <- model
} else {
name <- 'unnamed case'
}
}
}
name <- basename(name) ## name could be a pathed directory including tempdir(), which would change every time and hence appear as errors in line-by-line comparison with the gold file. So for futher purposes we use only the file name
## `missing(example)` does not work inside the test_that
if(!missing(example)) {
## classic-bugs example specified by name
dir = nimble:::getBUGSexampleDir(example)
if(missing(model)) model <- example
modelKnown <- TRUE
} else {
dir = ""
modelKnown <- !missing(model)
}
if(avoidNestedTest) { ## sometimes test_mcmc is called from within a test_that; this avoids report of empty test as of testthat v2.0.0
expect_true(modelKnown, 'Neither BUGS example nor model code supplied.')
Rmodel <- readBUGSmodel(model, data = data, inits = inits, dir = dir, useInits = TRUE,
check = FALSE)
test_mcmc_internal(Rmodel, ..., name = name, knownFailures = knownFailures, expectWarnings = expectWarnings)
} else {
test_that(name, {
expect_true(modelKnown, 'Neither BUGS example nor model code supplied.')
Rmodel <- readBUGSmodel(model, data = data, inits = inits, dir = dir, useInits = TRUE,
check = FALSE)
test_mcmc_internal(Rmodel, ..., name = name, knownFailures = knownFailures, expectWarnings = expectWarnings)
})
}
}
test_mcmc_internal <- function(Rmodel, ##data = NULL, inits = NULL,
verbose = nimbleOptions('verbose'), numItsR = 5, numItsC = 1000,
basic = TRUE, exactSample = NULL, results = NULL, resultsTolerance = NULL,
numItsC_results = numItsC,
resampleData = FALSE,
topLevelValues = NULL, seed = 0, mcmcControl = NULL, samplers = NULL, removeAllDefaultSamplers = FALSE,
doR = TRUE, doCpp = TRUE, returnSamples = FALSE, name = NULL, knownFailures = list(), expectWarnings = list()) {
# There are three modes of testing:
# 1) basic = TRUE: compares R and C MCMC values and, if requested by passing values in 'exactSample', will compare results to actual samples (you'll need to make sure the seed matches what was used to generate those samples)
# 2) if you pass 'results', it will compare MCMC output to known posterior summaries within tolerance specified in resultsTolerance
# 3) resampleData = TRUE: runs initial MCMC to get top level nodes then simulates from the rest of the model, including data, to get known parameter values, and fits to the new data, comparing parameter estimates from MCMC with the known parameter values
# samplers can be given individually for each node of interest or as a vector of nodes for univariate samplers or list of vectors of nodes for multivariate samplers
# e.g.,
# multiple univar samplers: samplers(type = 'RW', target = c('mu', 'x'))
# single multivar sampler: samplers(type = "RW_block", target = c('x[1]', 'x[2]'))
# single multivar sampler: samplers(type = "RW_block", target = 'x')
# multiple multivar samplers: samplers(type = "RW_block", target = list('x', c('theta', 'mu')))
setSampler <- function(var, conf) {
currentTargets <- sapply(conf$samplerConfs, function(x) x$target)
# remove already defined scalar samplers
inds <- which(unlist(var$target) %in% currentTargets)
conf$removeSamplers(inds, print = FALSE)
# look for cases where one is adding a blocked sampler specified on a variable and should remove scalar samplers for constituent nodes
currentTargets <- sapply(conf$samplerConfs, function(x) x$target)
inds <- which(sapply(unlist(var$target), function(x) Rmodel$expandNodeNames(x)) %in% currentTargets)
conf$removeSamplers(inds, print = FALSE)
if(is.list(var$target) && length(var$target) == 1) var$target <- var$target[[1]]
if(length(var$target) == 1 || (var$type %in% c("RW_block", "RW_PF_block", "RW_llFunction_block") && !is.list(var$target)))
tmp <- conf$addSampler(type = var$type, target = var$target, control = var$control, print = FALSE) else tmp <- sapply(var$target, function(x) conf$addSampler(type = var$type, target = x, control = var$control, print = FALSE))
}
wrap_if_matches('nameOK', names(knownFailures), expect_failure, {
expect_false(is.null(name), info = 'name argument NULL')
})
## leaving this message permanently on for now
cat("===== Starting MCMC test for ", name, ". =====\n", sep = "") ## for log file, for comparison to gold file
system(paste0("echo \"===== Starting MCMC test for ", name, ". =====\n\"", sep = "")) ## for travis log file, so it knows the process is not dead after 10 minutes of silence (message() does not work)
if(doCpp) {
Cmodel <- compileNimble(Rmodel)
}
if(!is.null(mcmcControl)) mcmcConf <- configureMCMC(Rmodel, control = mcmcControl) else mcmcConf <- configureMCMC(Rmodel)
if(removeAllDefaultSamplers) mcmcConf$removeSamplers()
if(!is.null(samplers)) {
sapply(samplers, setSampler, mcmcConf)
cat("Setting samplers to:\n")
print(mcmcConf$getSamplers())
}
vars <- Rmodel$getDependencies(Rmodel$getNodeNames(topOnly = TRUE, stochOnly = TRUE), stochOnly = TRUE, includeData = FALSE, downstream = TRUE)
vars <- unique(nimble:::removeIndexing(vars))
mcmcConf$addMonitors(vars, print = FALSE)
Rmcmc <- buildMCMC(mcmcConf)
if(doCpp) {
Cmcmc <- compileNimble(Rmcmc, project = Rmodel)
}
if(basic) {
## do short runs and compare R and C MCMC output
if(doR) {
set.seed(seed)
R_samples <- NULL
## need expect_error not expect_failure(expect_something()) because otherwise
## R error will stop execution
wrap_if_matches('R MCMC', names(knownFailures), expect_error, {
RmcmcOut <- Rmcmc$run(numItsR)
RmvSample <- nfVar(Rmcmc, 'mvSamples')
R_samples <- as.matrix(RmvSample)
})
}
if(doCpp) {
set.seed(seed)
Cmcmc$run(numItsC)
CmvSample <- nfVar(Cmcmc, 'mvSamples')
C_samples <- as.matrix(CmvSample)
## for some reason columns in different order in CmvSample...
if(doR)
C_subSamples <- C_samples[seq_len(numItsR), attributes(R_samples)$dimnames[[2]], drop = FALSE]
}
if(doR && doCpp && !is.null(R_samples)) {
wrap_if_matches('R C samples match', names(knownFailures), expect_failure, {
expect_equal(R_samples, C_subSamples, info = paste("R and C posterior samples are not equal"))
})
}
if(doCpp) {
if(!is.null(exactSample)) {
for(varName in names(exactSample))
wrap_if_matches('C samples match known samples', names(knownFailures), expect_failure, {
expect_equal(round(C_samples[seq_along(exactSample[[varName]]), varName], 8),
round(exactSample[[varName]], 8),
info = paste0("Equality of compiled MCMC samples and known exact samples for variable ", varName))})
}
}
summarize_posterior <- function(vals)
return(c(mean = mean(vals), sd = sd(vals), quantile(vals, .025), quantile(vals, .975)))
if(doCpp) {
start <- round(numItsC / 2) + 1
try(print(apply(C_samples[start:numItsC, , drop = FALSE], 2, summarize_posterior)))
}
}
## assume doR and doCpp from here down
if(!is.null(results)) {
## do (potentially) longer run and compare results to inputs given
set.seed(seed)
Cmcmc$run(numItsC_results)
CmvSample <- nfVar(Cmcmc, 'mvSamples')
postBurnin <- (round(numItsC_results/2)+1):numItsC_results
C_samples <- as.matrix(CmvSample)[postBurnin, , drop = FALSE]
for(metric in names(results)) {
if(!metric %in% c('mean', 'median', 'sd', 'var', 'cov'))
stop("Results input should be named list with the names indicating the summary metrics to be assessed, from amongst 'mean', 'median', 'sd', 'var', and 'cov'.")
if(metric != 'cov') {
postResult <- apply(C_samples, 2, metric)
for(varName in names(results[[metric]])) {
samplesNames <- dimnames(C_samples)[[2]]
if(!grepl("[", varName, fixed = TRUE))
samplesNames <- gsub("\\[.*\\]", "", samplesNames)
matched <- which(varName == samplesNames)
diff <- abs(postResult[matched] - results[[metric]][[varName]])
for(ind in seq_along(diff)) {
strInfo <- ifelse(length(diff) > 1, paste0("[", ind, "]"), "")
wrap_if_matches(paste('MCMC match to known posterior:', varName, metric, ind), names(knownFailures), expect_failure, {
expect_true(diff[ind] < resultsTolerance[[metric]][[varName]][ind],
info = paste("Test of MCMC result against known posterior for :", metric, "(", varName, strInfo, ")"))
})
}
}
} else { # 'cov'
for(varName in names(results[[metric]])) {
matched <- grep(varName, dimnames(C_samples)[[2]], fixed = TRUE)
postResult <- cov(C_samples[ , matched])
# next bit is on vectorized form of matrix so a bit awkward
diff <- c(abs(postResult - results[[metric]][[varName]]))
for(ind in seq_along(diff)) {
strInfo <- ifelse(length(diff) > 1, paste0("[", ind, "]"), "")
wrap_if_matches(paste('MCMC match to known posterior:', varName, 'cov', ind), names(knownFailures), expect_failure, {
expect_true(diff[ind] < resultsTolerance[[metric]][[varName]][ind],
info = paste("Test of MCMC result against known posterior for:", metric, "(", varName, ")", strInfo))
})
}
}
}
}
}
if(returnSamples) {
if(exists('CmvSample'))
returnVal <- as.matrix(CmvSample)
} else returnVal <- NULL
if(resampleData) {
topNodes <- Rmodel$getNodeNames(topOnly = TRUE, stochOnly = TRUE)
topNodesElements <- Rmodel$getNodeNames(topOnly = TRUE, stochOnly = TRUE,
returnScalarComponents = TRUE)
if(is.null(topLevelValues)) {
postBurnin <- (round(numItsC/2)):numItsC
if(is.null(results) && !basic) {
# need to generate top-level node values so do a basic run
set.seed(seed)
Cmcmc$run(numItsC)
CmvSample <- nfVar(Cmcmc, 'mvSamples')
C_samples <- as.matrix(CmvSample)[postBurnin, ]
}
topLevelValues <- as.list(apply(C_samples[ , topNodesElements, drop = FALSE], 2, mean))
}
if(!is.list(topLevelValues)) {
topLevelValues <- as.list(topLevelValues)
if(sort(names(topLevelValues)) != sort(topNodesElements))
stop("Values not provided for all top level nodes; possible name mismatch")
}
sapply(topNodesElements, function(x) Cmodel[[x]] <- topLevelValues[[x]])
# check this works as side effect
nontopNodes <- Rmodel$getDependencies(topNodes, self = FALSE, includeData = TRUE, downstream = TRUE, stochOnly = FALSE)
nonDataNodesElements <- Rmodel$getDependencies(topNodes, self = TRUE, includeData = FALSE, downstream = TRUE, stochOnly = TRUE, returnScalarComponents = TRUE)
dataVars <- unique(nimble:::removeIndexing(Rmodel$getDependencies(topNodes, dataOnly = TRUE, downstream = TRUE)))
set.seed(seed)
Cmodel$resetData()
simulate(Cmodel, nontopNodes)
dataList <- list()
for(var in dataVars) {
dataList[[var]] <- values(Cmodel, var)
if(Cmodel$modelDef$varInfo[[var]]$nDim > 1)
dim(dataList[[var]]) <- Cmodel$modelDef$varInfo[[var]]$maxs
}
Cmodel$setData(dataList)
trueVals <- values(Cmodel, nonDataNodesElements)
names(trueVals) <- nonDataNodesElements
set.seed(seed)
Cmcmc$run(numItsC_results)
CmvSample <- nfVar(Cmcmc, 'mvSamples')
postBurnin <- (round(numItsC_results/2)):numItsC
C_samples <- as.matrix(CmvSample)[postBurnin, nonDataNodesElements, drop = FALSE]
interval <- apply(C_samples, 2, quantile, c(.025, .975))
interval <- interval[ , names(trueVals)]
covered <- trueVals <= interval[2, ] & trueVals >= interval[1, ]
coverage <- sum(covered) / length(nonDataNodesElements)
tolerance <- 0.15
cat("Coverage for ", name, " is", coverage*100, "%.\n")
miscoverage <- abs(coverage - 0.95)
## always print for purpose of goldfile
# if(miscoverage > tolerance || verbose) {
cat("True values with 95% posterior interval:\n")
print(cbind(trueVals, t(interval), covered))
# }
wrap_if_matches('coverage', names(knownFailures), expect_failure, {
expect_true(miscoverage < tolerance,
info = paste("Test of MCMC coverage on known parameter values for:", name))
})
}
cat("===== Finished MCMC test for ", name, ". =====\n", sep = "")
if(doCpp) {
if(.Platform$OS.type != "windows") {
nimble:::clearCompiled(Rmodel)
}
}
return(returnVal)
}
test_filter <- function(example, model, data = list(), inits = list(),
verbose = nimbleOptions('verbose'), numItsR = 3, numItsC = 10000,
basic = TRUE, exactSample = NULL, results = NULL, resultsTolerance = NULL,
numItsC_results = numItsC,
seed = 0, filterType = NULL, latentNodes = NULL, filterControl = NULL,
doubleCompare = FALSE, filterType2 = NULL,
doR = TRUE, doCpp = TRUE, returnSamples = FALSE, name = NULL, dirName = NULL,
knownFailures = list()) {
## There are two modes of testing:
## 1) basic = TRUE: compares R and C Particle Filter likelihoods and sampled states
## 2) if you pass 'results', it will compare Filter output to known latent state posterior summaries, top-level parameter posterior summaries,
## and likelihoods within tolerance specified in resultsTolerance. Results are compared for both weighted and unweighted samples.
## filterType determines which filter to use for the model. Valid options are: "bootstrap", "auxiliary", "LiuWest", "ensembleKF"
## filterControl specifies options to filter function, such as saveAll = TRUE/FALSE.
if(is.null(name)) {
if(!missing(example)) {
name <- example
} else {
if(is.character(model)) {
name <- model
} else {
name <- 'unnamed case'
}
}
}
## keep this outside of test_that as use of missing within test_that triggers error with "'missing' can
## only be used for arguments"
if(!missing(example)) {
## classic-bugs example specified by name
dir <- getBUGSexampleDir(example)
if(missing(model)) model <- example
} else {
## code, data and inits specified directly where 'model' contains the code
example = deparse(substitute(model))
if(missing(model)) stop("Neither BUGS example nor model code supplied.")
dir <- ""
}
returnVal <- NULL
cat("===== Starting Filter test for ", name, " using ", filterType, ". =====\n", sep = "")
test_that(name, {
Rmodel <- readBUGSmodel(model, dir = dir, data = data, inits = inits, useInits = TRUE, check = FALSE)
if(doCpp) {
Cmodel <- compileNimble(Rmodel, dirName = dirName)
if(verbose) cat('done compiling model\n')
}
if(verbose) cat("Building filter\n")
if(filterType == "bootstrap"){
if(!is.null(filterControl)) Rfilter <- buildBootstrapFilter(Rmodel, nodes = latentNodes, control = filterControl)
else Rfilter <- buildBootstrapFilter(Rmodel, nodes = latentNodes, control = list(saveAll = TRUE, thresh = 0))
}
if(filterType == "auxiliary"){
if(!is.null(filterControl)) Rfilter <- buildAuxiliaryFilter(Rmodel, nodes = latentNodes, control = filterControl)
else Rfilter <- buildAuxiliaryFilter(Rmodel, nodes = latentNodes, control = list(saveAll = TRUE))
}
if(filterType == "LiuWest"){
if(!is.null(filterControl)) Rfilter <- buildLiuWestFilter(Rmodel, nodes = latentNodes, control = filterControl)
else Rfilter <- buildLiuWestFilter(Rmodel, nodes = latentNodes, control = list(saveAll = TRUE))
}
if(filterType == "ensembleKF"){
if(!is.null(filterControl)) Rfilter <- buildEnsembleKF(Rmodel, nodes = latentNodes, control = filterControl)
else Rfilter <- buildEnsembleKF(Rmodel, nodes = latentNodes, control = list(saveAll = TRUE))
}
saveAll <- TRUE
if(!is.null(filterControl) && exists('saveAll', filterControl))
saveAll <- filterControl$saveAll
if(doCpp) {
Cfilter <- compileNimble(Rfilter, project = Rmodel, dirName = dirName)
}
if(basic) {
## do short runs and compare R and C filter output
if(doR) {
set.seed(seed)
RfilterOut <- Rfilter$run(numItsR)
if(filterType == "ensembleKF"){
RmvSample <- nfVar(Rfilter, 'mvSamples')
R_samples <- as.matrix(RmvSample)
}
else{
RmvSample <- nfVar(Rfilter, 'mvWSamples')
RmvSample2 <- nfVar(Rfilter, 'mvEWSamples')
R_samples <- as.matrix(RmvSample)
R_samples2 <- as.matrix(RmvSample2)
if(filterType != 'LiuWest'){
R_ESS <- Rfilter$returnESS()
}
}
}
if(doCpp) {
set.seed(seed)
CfilterOut <- Cfilter$run(numItsR)
if(filterType == "ensembleKF"){
CmvSample <- nfVar(Cfilter, 'mvSamples')
C_samples <- as.matrix(CmvSample)
C_subSamples <- C_samples[, attributes(R_samples)$dimnames[[2]], drop = FALSE]
}
else{
CmvSample <- nfVar(Cfilter, 'mvWSamples')
CmvSample2 <- nfVar(Cfilter, 'mvEWSamples')
C_samples <- as.matrix(CmvSample)
C_samples2 <- as.matrix(CmvSample2)
C_subSamples <- C_samples[, attributes(R_samples)$dimnames[[2]], drop = FALSE]
C_subSamples2 <- C_samples2[, attributes(R_samples2)$dimnames[[2]], drop = FALSE]
if(filterType != 'LiuWest'){
C_ESS <- Rfilter$returnESS()
for(i in seq_along(length(C_ESS))){
wrap_if_matches('C ESS >= 0', names(knownFailures), expect_failure, {
expect_gte(C_ESS[i], 0)
})
wrap_if_matches('C ESS <= numIts', names(knownFailures), expect_failure, {
expect_lte(C_ESS[i], numItsR)
})
}
}
}
## for some reason columns in different order in CmvSample...
}
if(doR && doCpp && !is.null(R_samples)) {
## context(paste0("testing ", example," ", filterType, " filter"))
if(filterType == "ensembleKF"){
expect_equal(R_samples, C_subSamples, info = paste("R and C posterior samples are not equal"))
}
else{
expect_equal(R_samples, C_subSamples, info = paste("R and C weighted posterior samples are not equal"))
expect_equal(R_samples2, C_subSamples2, info = paste("R and C equally weighted posterior samples are not equal"))
expect_equal(RfilterOut, CfilterOut, info = paste("R and C log likelihood estimates are not equal"))
if(filterType != 'LiuWest'){
wrap_if_matches('R C ESS match', names(knownFailures), expect_failure, {
expect_equal(R_ESS, C_ESS, info = paste("R and C ESS are not equal"))
})
}
}
}
if(doCpp) {
if(!is.null(exactSample)) {
for(varName in names(exactSample))
expect_equal(round(C_samples[seq_along(exactSample[[varName]]), varName], 8), round(exactSample[[varName]], 8), info = paste0("filter result does not match known samples for: ", varName))
}
}
summarize_posterior <- function(vals)
return(c(mean = mean(vals), sd = sd(vals), quantile(vals, .025), quantile(vals, .975)))
if(doCpp) {
## if(verbose) {
## try(print(apply(C_samples[, , drop = FALSE], 2, summarize_posterior)))
## }
}
}
## assume doR and doCpp from here down
if(!is.null(results)) {
## do (potentially) longer run and compare results to inputs given
set.seed(seed)
Cll <- Cfilter$run(numItsC_results)
for(wMetric in c(TRUE, FALSE)){
weightedOutput <- 'unweighted'
if(filterType == "ensembleKF")
CfilterSample <- nfVar(Cfilter, 'mvSamples')
else{
if(wMetric){
CfilterSample <- nfVar(Cfilter, 'mvWSamples')
weightedOutput <-"weighted"
}
else
CfilterSample <- nfVar(Cfilter, 'mvEWSamples')
}
C_samples <- as.matrix(CfilterSample)[, , drop = FALSE]
if(weightedOutput == "weighted"){
wtIndices <- grep("^wts\\[", dimnames(C_samples)[[2]])
C_weights <- as.matrix(C_samples[,wtIndices, drop = FALSE])
C_samples <- as.matrix(C_samples[,-wtIndices, drop = FALSE])
}
latentNames <- Rmodel$expandNodeNames(latentNodes, sort = TRUE, returnScalarComponents = TRUE)
if(weightedOutput == "weighted"){
samplesToWeightsMatch <- rep(dim(C_weights)[2], dim(C_samples)[2])
latentIndices <- match(latentNames, dimnames(C_samples)[[2]])
latentSampLength <- length(latentNames)
if(!saveAll) { ## added without careful checking; may not be robust
latentIndices <- latentIndices[!is.na(latentIndices)]
latentSampLength <- 1
}
latentDim <- latentSampLength/dim(C_weights)[2]
samplesToWeightsMatch[latentIndices] <- rep(1:dim(C_weights)[2], each = latentDim )
}
for(metric in names(results)) {
if(!metric %in% c('mean', 'median', 'sd', 'var', 'cov', 'll'))
stop("Results input should be named list with the names indicating the summary metrics to be assessed, from amongst 'mean', 'median', 'sd', 'var', 'cov', and 'll'.")
if(!(metric %in% c('cov', 'll'))) {
if(weightedOutput == "weighted"){
postResult <- sapply(1:dim(C_samples)[2], weightedMetricFunc, metric = metric, weights = C_weights, samples = C_samples, samplesToWeightsMatch)
}
else
postResult <- apply(C_samples, 2, metric)
for(varName in names(results[[metric]])) {
samplesNames <- dimnames(C_samples)[[2]]
if(!grepl(varName, "[", fixed = TRUE))
samplesNames <- gsub("\\[.*\\]", "", samplesNames)
matched <- which(varName == samplesNames)
if(!saveAll) { ## added without careful checking; may not be robust
diff <- abs(postResult[matched] - results[[metric]][[varName]][length(results[[metric]][[varName]])])
} else {
diff <- abs(postResult[matched] - results[[metric]][[varName]])
}
for(ind in seq_along(diff)) {
strInfo <- ifelse(length(diff) > 1, paste0("[", ind, "]"), "")
expect_lt(diff[ind], resultsTolerance[[metric]][[varName]][ind],
label = paste0("filter posterior result against known posterior for:", weightedOutput, metric, "(", varName, strInfo, ")"))
}
}
} else if (metric == 'cov' ) {
for(varName in names(results[[metric]])) {
matched <- grep(varName, dimnames(C_samples)[[2]], fixed = TRUE)
## if(weightedOutput == "weighted")
## postResult <- cov.wt(C_samples[, matched], wt = ) #weighted covariance not currently implemented
## else
postResult <- cov(C_samples[ , matched])
## next bit is on vectorized form of matrix so a bit awkward
diff <- c(abs(postResult - results[[metric]][[varName]]))
for(ind in seq_along(diff)) {
strInfo <- ifelse(length(diff) > 1, paste0("[", ind, "]"), "")
expect_lt(diff[ind], resultsTolerance[[metric]][[varName]][ind],
label = paste0("filter posterior result against known posterior for", example, ":", metric, "(", varName, ")", strInfo))
}
}
}
else { # ll (log likelihood)
diff <- abs(Cll - results[[metric]][[1]][1])
expect_lt(diff, resultsTolerance[[metric]][[1]][1], label = paste0("filter log-likelihood result against known log-likelihood for", example, ":", metric))
}
}
}
}
try(print(apply(as.matrix(C_samples), 2, summarize_posterior))) ## print summaries of equally weighted samples
if(returnSamples) {
if(exists('CmvSample'))
returnVal <- as.matrix(CmvSample)
}
if(doCpp) {
if(.Platform$OS.type != 'windows')
nimble:::clearCompiled(Rmodel)
}
})
cat("===== Finished ", filterType, " filter test for ", name, ". =====\n", sep = "")
return(returnVal)
}
## Testing for correct behavior of different resampling methods
## used within PFs.
## samplerName - A string with the name of
## the resampling function to be tested.
## wtsList - A list, where each element is a vector of weights to use for
## testing, given as input to the resampler functions.
## reps - An integer, the number of repetitions to conduct.
##
## For each provided set of weights in wtsList, the test will produce
## 'reps' number of samples according to those weights. For each set of
## samples, the number of times each element was sampled is recorded. These
## recorded counts are averaged over the 'reps' number of samples, and then
## the averages are compared to the expected count of each element
## (i.e. wts*length(wts)).
test_resampler <- function(samplerName, wtsList, reps = 500, creps = reps){
n <- sapply(wtsList, function(x){return(length(x))})
output <- lapply(n, function(x){return(numeric(x))})
avgCounts <- output
samplerFunction <- getFromNamespace(samplerName, 'nimble')()
for(rep in 1:reps){
counts <- list()
for(i in 1:length(wtsList)){
output[[i]] <- samplerFunction$run(wtsList[[i]])
counts[[i]] <- numeric(length(output[[i]]))
for(j in 1:n[i]){
counts[[i]][j] <- length(which(output[[i]] == j))
}
avgCounts[[i]] <- avgCounts[[i]] + counts[[i]]
}
}
expectedValue <- list(length(wtsList))
for(i in 1:length(wtsList)){
avgCounts[[i]] <- avgCounts[[i]]/reps
expectedValue[[i]] <- n[i]*(wtsList[[i]]/sum(wtsList[[i]]))
diffVec <- abs(expectedValue[[i]] - avgCounts[[i]])
for(j in 1:n[i]){
test_that(paste0("Test of accurate samples for uncompiled resampling
method ", samplerName, ", weight set ", i,
", weight number ", j),
expect_lt(diffVec[j], sqrt(expectedValue[[i]][j]) + .01))
}
}
avgCounts <- lapply(n, function(x){return(numeric(x))})
compiledSamplerFunction <- compileNimble(samplerFunction)
for(rep in 1:reps){
counts <- list()
for(i in 1:length(wtsList)){
output[[i]] <- compiledSamplerFunction$run(wtsList[[i]])
counts[[i]] <- numeric(length(output[[i]]))
for(j in 1:n[i]){
counts[[i]][j] <- length(which(output[[i]] == j))
}
avgCounts[[i]] <- avgCounts[[i]] + counts[[i]]
}
}
expectedValue <- list(length(wtsList))
for(i in 1:length(wtsList)){
avgCounts[[i]] <- avgCounts[[i]]/reps
expectedValue[[i]] <- n[i]*(wtsList[[i]]/sum(wtsList[[i]]))
diffVec <- abs(expectedValue[[i]] - avgCounts[[i]])
for(j in 1:n[i]){
test_that(paste0("Test of accurate samples for compiled resampling
method ", samplerName, ", weight set ", i,
", weight number ", j),
expect_lt(diffVec[j], sqrt(expectedValue[[i]][j]) + .01))
}
}
}
weightedMetricFunc <- function(index, samples, weights, metric, samplesToWeightsMatch){
samples <- samples[,index]
weights <- exp(weights[,samplesToWeightsMatch[index]])/sum(exp(weights[,samplesToWeightsMatch[index]]))
if(metric == "median"){
ord <- order(samples)
weights <- weights[ord]
samples <- samples[ord]
sumWts <- 0
while(sumWts < .5){
sumWts <- sumWts + weights[1]
weights <- weights[-1]
}
return(samples[length(samples)-length(weights)])
}
wtMean <- weighted.mean(samples, weights)
if(metric == "mean"){
return(wtMean)
}
wtVar <- sum(weights*(samples - wtMean)^2)
if(metric == "var"){
return(wtVar)
}
if(metric == "sd"){
return(sqrt(wtVar))
}
}
test_size <- function(input, verbose = nimbleOptions('verbose')) {
if(is.null(input$expectPassWithConst)) input$expectPassWithConst <- input$expectPass
if(is.null(input$knownProblem)) input$knownProblem <- FALSE
if(is.null(input$knownProblemWithConst)) input$knownProblemWithConst <- input$knownProblem
if(is.null(input$expectWarn)) input$expectWarn <- FALSE
if(is.null(input$expectWarnWithConst)) input$expectWarnWithConst <- input$expectWarn
if(verbose) cat("### Testing", input$name, " with RHS variable ###\n")
code <- quote({
m <- nimbleModel(code = input$expr, data = input$data, inits = input$inits)
calculate(m) ## Calculates from scratch.
calculate(m) ## Uses cached value.
})
message = paste(input$name, 'with RHS variable', ifelse(input$expectPass, 'works', 'fails'), 'as expected')
if (input$knownProblem) message = paste(message, 'marked as KNOWN ISSUE')
if(xor(input$expectPass, input$knownProblem)) {
if(input$expectWarn) {
test_that(message, expect_warning(eval(code)))
} else test_that(message, expect_silent(eval(code)))
} else {
test_that(message, expect_error(suppressWarnings(eval(code))))
}
if(verbose) cat("### Testing", input$name, "with RHS constant ###\n")
code <- quote({
m <- nimbleModel(code = input$expr, data = input$data, constants = input$inits)
calculate(m) ## Calculates from scratch.
calculate(m) ## Uses cached value.
})
message = paste(input$name, 'with RHS constant', ifelse(input$expectPassWithConst, 'works', 'fails'), 'as expected')
if (input$knownProblemWithConst) message = paste(message, 'marked as KNOWN ISSUE')
if(xor(input$expectPassWithConst, input$knownProblemWithConst)) {
if(input$expectWarnWithConst) {
test_that(message, expect_warning(eval(code)))
} else test_that(message, expect_silent(eval(code)))
} else {
## As of testthat 3.0, warnings bubble up so need to deal with them by suppression.
test_that(message, expect_error(suppressWarnings(eval(code))))
}
invisible(NULL)
}
# could redo test_size to always expect specific error, but not taking time to do that now
test_size_specific_error <- function(input, verbose = nimbleOptions('verbose')) {
if(verbose) cat("### Testing", input$name, "###\n")
test_that(paste0("Test 1 of size/dimension check: ", input$name), {
expect_error(nimbleModel(code = input$expr, data = input$data, inits = input$inits),
regexp = input$correctErrorMsg, info = paste("Result does not match", input$expectPass))
})
invisible(NULL)
}
test_getParam <- function(distCall, dist = NULL) {
distCallText <- deparse(distCall)
test_that(distCallText, {
gpScalar <- nimbleFunction(
setup = function(model, node, param) {},
run = function() {
ans1 <- model$getParam(node, param)
ans2 <- getParam(model, node, param) ## to become model$getParam(node, param)
if(ans1 != ans2) stop('oops, ans1 != ans2')
return(ans1)
returnType(double())
})
if(is.null(dist)) dist <- nimble:::getDistributionInfo(as.character(distCall[[1]])) else dist <- nimble:::getDistributionInfo(dist)
code <- substitute({x ~ DISTCALL}, list(DISTCALL = distCall))
m <- nimbleModel( code = code )
cm <- compileNimble(m)
gpFuns <- list()
expectedResults <- list()
altParams <- dist$altParams
altParamNames <- names(altParams)
distCallText <- deparse(distCall)
reqdArgs <- dist$reqdArgs ## these are canonical
exprs <- dist$exprs
alts <- dist$alts
providedArgs <- names(distCall)
providedArgs <- providedArgs[providedArgs != ""]
whichExpr <- NULL
## figure out which way arguments were provided in distCall
for(i in seq_along(exprs)) {
if(all(providedArgs %in% alts[[i]])) whichExpr <- i
}
if(is.null(whichExpr)) {
if(all(providedArgs %in% reqdArgs)) whichExpr <- 0
}
expect_equal(is.null(whichExpr), FALSE, 'args not found')
## exprs give expressions for calculating reqdArgs from alts
## altParams give expressions for calculating individual alts from reqdArgs
## put reqd in evalEnv, which means using exprs for the alts as needed
## if testing on something provided, grab what was provided.
## if testing on something not provided, if it is reqd then use it directly
## otherwise calculate it from altParams
evalEnv <- new.env()
for(i in seq_along(distCall)) {
if(names(distCall)[i] != "") assign(names(distCall)[i], distCall[[i]], envir = evalEnv)
}
if(whichExpr > 0) { ## what was provided was not canonical
for(i in seq_along(exprs[[whichExpr]])) {
assign(names(exprs[[whichExpr]])[i], eval(exprs[[whichExpr]][[i]], envir = evalEnv), envir = evalEnv)
}
}
## check recovery of alternative param names from what was provided
for(i in seq_along(altParamNames)) {
gpFuns[[i]] <- gpScalar(m, 'x', altParamNames[i])
if(altParamNames[i] %in% providedArgs) ## it was provided so simply eval the name
expectedResults[[i]] <- eval(as.name(altParamNames[i]), envir = evalEnv)
else ## it wasn't provided so eval the expression to calculate it from reqdArgs
expectedResults[[i]] <- eval(altParams[[i]], envir = evalEnv)
expect_equal(gpFuns[[i]]$run(), expectedResults[[i]], info = paste('error in uncompiled use',
altParamNames[i]))
expect_equal(m$getParam('x', altParamNames[i]), expectedResults[[i]],
info = paste('error in R getParam', altParamNames[i]))
expect_equal(cm$getParam('x', altParamNames[i]), expectedResults[[i]],
info = paste('error in C getParam', altParamNames[i]))
}
resultsNames <- altParamNames
nextI <- length(expectedResults)+1
for(i in seq_along(reqdArgs)) {
gpFuns[[nextI]] <- gpScalar(m, 'x', reqdArgs[i])
expectedResults[[nextI]] <- eval(as.name(reqdArgs[i]), envir = evalEnv) ## it was already calculated into evalEnv above
expect_equal(gpFuns[[nextI]]$run(), expectedResults[[nextI]],
info = paste('error in uncompiled reqd', reqdArgs[i]))
expect_equal(m$getParam('x', reqdArgs[i]), expectedResults[[nextI]],
info = paste('error in R model reqd', reqdArgs[i]))
expect_equal(cm$getParam('x', reqdArgs[i]), expectedResults[[nextI]],
info = paste('error in C model reqd', reqdArgs[i]))
resultsNames[nextI] <- reqdArgs[i]
nextI <- nextI + 1
}
compiled <- do.call('compileNimble', c(list(m), gpFuns, list(resetFunctions = TRUE)))
for(i in seq_along(expectedResults)) {
expect_equal(compiled[[i+1]]$run(), expectedResults[[i]],
info = paste('error in compiled', resultsNames[i]))
}
if(.Platform$OS.type != 'windows') nimble:::clearCompiled(m)
})
invisible(NULL)
}
test_getBound <- function(model, cmodel, test, node, bnd, truth, info) {
test_that(paste0("getBound test: ", info), {
rtest <- test(model, node, bnd)
project <- nimble:::nimbleProjectClass(NULL, name = 'foo')
ctest <- compileNimble(rtest, project = project)
out1 <- model$getBound(node, bnd)
out2 <- getBound(model, node, bnd)
out3 <- cmodel$getBound(node, bnd)
out4 <- getBound(cmodel, node, bnd)
nfOutput <- rtest$run()
cnfOutput <- ctest$run()
expect_equal(truth, out1, info = paste0("mismatch of true bound with getBound result: ", info))
expect_equal(out1, out2, info = paste0("function vs. method getBound call mismatch for uncompiled model with: ", info))
expect_equal(out1, out2, info = paste0("function vs. method getBound call mismatch for compiled model with: ", info))
expect_equal(out1, out3, info = paste0("uncompiled vs. compiled getBound call mismatch with: ", info))
expect_equal(out1, nfOutput[1], info = paste0("direct vs. nimbleFunction getBound call mismatch for uncompiled model with: ", info))
expect_equal(nfOutput[1], nfOutput[2], info = paste0("function vs. method getBound call mismatch for uncompiled nimbleFunction with: ", info))
expect_equal(out3, cnfOutput[1], info = paste0("direct vs. nimbleFunction getBound call mismatch for compiled model with: ", info))
expect_equal(cnfOutput[1], cnfOutput[2], info = paste0("function vs. method getBound call mismatch for compiled nimbleFunction with: ", info))
# if(.Platform$OS.type != 'windows') dyn.unload(project$cppProjects[[1]]$getSOName())
})
invisible(NULL)
}
expandNames <- function(var, ...) {
tmp <- as.matrix(expand.grid(...))
indChars <- apply(tmp, 1, paste0, collapse=', ')
## Use of sort here only puts names in same order as NIMBLE if have no double-digit indexes.
sort(paste0(var, "[", indChars, "]"))
}
test_dynamic_indexing_model <- function(param) {
test_that(param$case, test_dynamic_indexing_model_internal(param))
invisible(NULL)
}
test_dynamic_indexing_model_internal <- function(param) {
if(!is.null(param$expectError) && param$expectError) {
expect_error(m <- nimbleModel(param$code, dimensions = param$dims, inits = param$inits, data = param$data, constants = param$constants), param$expectErrorMsg, info = "expected error not generated")
} else {
m <- nimbleModel(param$code, dimensions = param$dims, inits = param$inits, data = param$data, constants = param$constants)
expect_true(inherits(m, 'modelBaseClass'), info = "problem creating model")
for(i in seq_along(param$expectedDeps))
expect_identical(m$getDependencies(param$expectedDeps[[i]]$parent, stochOnly = TRUE),
param$expectedDeps[[i]]$result, info = paste0("dependencies don't match expected in dependency of ", param$expectedDeps[[i]]$parent))
cm <- compileNimble(m)
expect_true(is.Cmodel(cm), info = "compiled model object improperly formed")
expect_identical(calculate(m), calculate(cm), info = "problem with R vs. C calculate with initial indexes")
for(i in seq_along(param$validIndexes)) {
for(j in seq_along(param$validIndexes[[i]]$var)) {
m[[param$validIndexes[[i]]$var[j]]] <- param$validIndexes[[i]]$value[j]
cm[[param$validIndexes[[i]]$var[j]]] <- param$validIndexes[[i]]$value[j]
}
expect_true(is.numeric(calculate(cm)), 1, info = paste0("problem with C calculate with valid indexes, case: ", i))
expect_true(is.numeric(calculateDiff(cm)), info = paste0("problem with C calculateDiff with valid indexes, case: ", i))
expect_identical(calculate(m), calculate(cm), info = paste0("problem with R vs. C calculate with valid indexes, case: ", i))
deps <- m$getDependencies(param$invalidIndexes[[i]]$var, self = FALSE)
expect_true(is.null(simulate(cm, deps, includeData = TRUE)), info = paste0("problem with C simulate with valid indexes, case: ", i))
## Reset values so can models have same values for comparisons in later iterations.
cm$setInits(param$inits)
cm$setData(param$data)
cm$calculate()
}
for(i in seq_along(param$invalidIndexes)) {
for(j in seq_along(param$invalidIndexes[[i]]$var)) {
m[[param$invalidIndexes[[i]]$var[j]]] <- param$invalidIndexes[[i]]$value[j]
cm[[param$invalidIndexes[[i]]$var[j]]] <- param$invalidIndexes[[i]]$value[j]
}
expect_output(out <- calculate(m), "Dynamic index out of bounds", info = paste0("problem with lack of warning in R calculate with non-NA invalid indexes, case: ", i))
expect_equal(out, NaN, info = paste0("problem with lack of NaN in R calculate with non-NA invalid indexes, case: ", i))
expect_output(out <- calculate(cm), "Dynamic index out of bounds", info = paste0("problem with lack of warning in C calculate with invalid indexes, case: ", i))
expect_equal(out, NaN, info = paste0("problem with lack of NaN in C calculate with invalid indexes, case: ", i))
expect_output(out <- calculateDiff(cm), "Dynamic index out of bounds", info = paste0("problem with lack of warning in C calculateDiff with invalid indexes, case: ", i))
expect_equal(out, NaN, info = paste0("problem with lack of NaN in C calculateDiff with invalid indexes, case: ", i))
deps <- m$getDependencies(param$invalidIndexes[[i]]$var, self = FALSE)
expect_output(simulate(cm, deps, includeData = TRUE), "Dynamic index out of bounds", info = paste0("problem with lack of warning in C simulate with invalid indexes, case: ", i))
expect_true(sum(is.nan(values(cm, deps))) >= 1, info = paste0("problem with lack of NaN in C simulate with invalid indexes, case: ", i))
}
if(.Platform$OS.type != "windows") {
nimble:::clearCompiled(m)
}
}
invisible(NULL)
}
## utilities for saving test output to a reference file
## and making the test a comparison of the file
clearOldOutput <- function(filename) {
if(file.exists(filename)) file.remove(filename)
}
appendOutput <- function(filename, case, caseName, casePrefix = "") {
outputConnection <- file(filename, open = 'at')
writeLines(caseName, con = outputConnection)
outputAns <- lapply(case, function(x) writeLines(paste0(casePrefix, paste(x, collapse = " ")), con = outputConnection))
close(outputConnection)
}
writeOutput <- function(cases, filename) {
clearOldOutput(filename)
for(i in seq_along(cases)) appendOutput(filename, cases[[i]], names(cases)[i], casePrefix = paste0(i,": "))
}
stripTestPlacementWarning <- function(lines) {
## deal with Placing tests in `inst/tests/` is deprecated warning
## as it doesn't seem entirely predictable when/where it appears
coreLines <- grep("^Placing tests in", lines)
addedLines <- lines[coreLines-1] == "In addition: Warning message:"
totalLines <- c(coreLines-addedLines, coreLines)
if(length(totalLines))
return(lines[-totalLines]) else return(lines)
}
stripTestsPassedMessage <- function(lines) {
stripLines <- grep("^Test passed", lines)
if(length(stripLines))
return(lines[-stripLines]) else return(lines)
}
compareFilesByLine <- function(trialResults, correctResults, main = "") {
trialResults <- stripTestsPassedMessage(stripTestPlacementWarning(trialResults))
correctResults <- stripTestPlacementWarning(correctResults)
expect_equal(length(trialResults), length(correctResults))
linesToTest <- min(length(trialResults), length(correctResults))
mapply(function(lineno, trialLine, correctLine) {
expect_identical(trialLine, correctLine)
}, 1:linesToTest, trialResults[1:linesToTest], correctResults[1:linesToTest])
invisible(NULL)
}
#### process test-mcmc.R output
##trialResults <- readLines(tempFileName)
##trialResults <- trialResults[grep('Error in x$.self$finalize() : attempt to apply non-function', trialResults, invert = TRUE, fixed = TRUE)]
##newResults <- trialResults
##i <- 1
##while(i <= length(newResults)) {
## if(grepl('Test passed', newResults[i])) {
## if(i < length(newResults)) {
## newResults[i] <- paste0(gsub('Test passed.*', '', newResults[i]), newResults[i+1])
## newResults <- newResults[-(i+1)]
## } else {
## ## i == length(newResults)
## if(grepl('^Test passed', newResults[i])) {
## newResults <- newResults[-i]
## } else {
## newResults[i] <- gsub('Test passed.*', '', newResults[i])
## }
## }
## } else {
## i <- i + 1
## }
##}
##writeLines(newResults, FILENAME)
compareFilesUsingDiff <- function(trialFile, correctFile, main = "") {
if(main == "") main <- paste0(trialFile, ' and ', correctFile, ' do not match\n')
diffOutput <- system2('diff', c(trialFile, correctFile), stdout = TRUE)
test_that(paste0(main, paste0(diffOutput, collapse = '\n')),
expect_true(length(diffOutput) == 0)
)
invisible(NULL)
}
## Create a nimbleFunction parametrization to be passed to gen_runFunCore().
##
## op: An operator string.
## argTypes: A character vector of argTypes (e.g. "double(0)").
## If this is a named vector, then the names will be
## interpreted as the formals to the constructed op call.
## more_args: A named list, e.g. list(log = 1), that will be added
## as a formal to the output expr but not part of args.
##
## output_code: quoted code such as quote(Y^2) to be used with Y
## substituted as the operation result. E.g., if the
## operation is `arg1 + arg2`, this would replace it with
## (arg1 + arg2)^2.
##
## inner_codes: list of quoted code such as quote(X^2) to be used with
## X substituted as the corresponding argument. The list
## must be as long as the number of arugments, with NULL
## entries indicating no substitution. E.g. with
## list(NULL, quote(X^2)), `arg1 + arg2` would be changed
## to `art1 + arg2^2`.
make_op_param <- function(op, argTypes, more_args = NULL,
outer_code = NULL, inner_codes = NULL) {
returnTypeProvided <- NULL
if(isTRUE(attr(argTypes, "includesReturnType"))) {
returnTypeProvided <- argTypes[[2]]
argTypes <- argTypes[[1]]
}
arg_names <- names(argTypes)
if (is.null(arg_names)) {
arg_names <- paste0('arg', 1:length(argTypes))
op_args <- lapply(arg_names, as.name)
} else {
op_args <- sapply(arg_names, as.name, simplify = FALSE)
}
args_string <- paste0(arg_names, ' = ', argTypes, collapse = ' ')
name <- paste(op, args_string)
# Add inner funs around arguments
if(!is.null(inner_codes)) {
for(i in seq_along(op_args)) {
if(!is.null(inner_codes[[i]])) {
op_args[[i]] <- eval(substitute(
substitute(INNER_CODE,
list(X = op_args[[i]])),
list(INNER_CODE = inner_codes[[i]]))
)
}
}
}
this_call <- as.call(c(
substitute(FOO, list(FOO = as.name(op))),
op_args, more_args
))
if(is.null(outer_code)) {
expr <- substitute(
out <- THIS_CALL,
list(THIS_CALL = this_call)
)
} else {
expr <- eval(substitute(
substitute(
out <- OUTER_CODE,
list(Y = this_call)),
list(OUTER_CODE = outer_code))
)
}
argTypesList <- as.list(argTypes)
names(argTypesList) <- arg_names
argTypesList <- lapply(argTypesList, function(arg) {
parse(text = arg)[[1]]
})
if(is.null(returnTypeProvided))
outputType <- return_type_string(op, argTypes)
else
outputType <- returnTypeProvided
list(
name = name,
expr = expr,
args = argTypesList,
outputType = parse(text = outputType)[[1]]
)
}
## Takes an operator and its input types as a character vector and
## creates a string representing the returnType for the operation.
##
## op: An operator string
## argTypes: A character vector of argTypes (e.g. "double(0)".
##
return_type_string <- function(op, argTypes) {
## multivariate distributions ops. These do not support recycling rule behavior, so the return type is always double(0)
mvdist_ops <- names(nimble:::sizeCalls)[nimble:::sizeCalls == 'sizeScalarRecurseAllowMaps']
if(op %in% mvdist_ops)
return("double(0)")
## see ops handled by eigenize_recyclingRuleFunction in genCpp_eigenization.R
recycling_rule_ops <- c(
nimble:::scalar_distribution_dFuns,
nimble:::scalar_distribution_pFuns,
nimble:::scalar_distribution_qFuns,
nimble:::scalar_distribution_rFuns,
paste0(c('d', 'q', 'p', 'r'), 't'),
paste0(c('d', 'q', 'p', 'r'), 'exp'),
'bessel_k'
)
returnTypeCode <- nimble:::returnTypeHandling[[op]]
if (is.null(returnTypeCode))
if (!op %in% recycling_rule_ops)
return(argTypes[1])
else returnTypeCode <- 1
scalarTypeString <- switch(
returnTypeCode,
'double', ## 1
'integer', ## 2
'logical' ## 3
)
args <- lapply(
argTypes, function(argType)
nimble:::argType2symbol(parse(text = argType)[[1]])
)
if (is.null(scalarTypeString)) ## returnTypeCode is 4 or 5
scalarTypeString <-
if (length(argTypes) == 1)
if (returnTypeCode == 5 && args[[1]]$type == 'logical') 'integer'
else args[[1]]$type
else if (length(argTypes) == 2) {
aot <- nimble:::arithmeticOutputType(args[[1]]$type, args[[2]]$type)
if (returnTypeCode == 5 && aot == 'logical') 'integer'
else aot
}
reductionOperators <- c(
nimble:::reductionUnaryOperators,
nimble:::matrixSquareReductionOperators,
nimble:::reductionBinaryOperatorsEither,
'dmulti'
)
nDim <- if (op %in% reductionOperators) 0
else max(sapply(args, `[[`, 'nDim'))
if (nDim > 2)
stop(
'Testing does not currently support args with nDim > 2',
call. = FALSE
)
sizes <- if (nDim == 0) 1
else if (length(argTypes) == 1) args[[1]]$size
else if (op %in% nimble:::matrixMultOperators) {
if (!length(argTypes) == 2)
stop(
paste0(
'matrixMultOperators should only have 2 args but got ',
length(argTypes)
), call. = FALSE
)
c(args[[1]]$size[1], args[[2]]$size[2])
} else if (nDim == 2) {
## one arg is a matrix but this is not matrix multiplication
## so assume that the first arg with nDim > 1
has_right_nDim <- sapply(args, function(arg) arg$nDim == nDim)
args[has_right_nDim][[1]]$size
} else {
## nDim is 1 so either recycling rule or simple vector operator
max((sapply(args, `[[`, 'size')))
}
size_string <- if(all(is.na(sizes)))
''
else if (nDim > 0)
paste0(', c(', paste(sizes, collapse = ', '), ')')
else ''
return(paste0(scalarTypeString, '(', nDim, size_string, ')'))
}
## Takes an argSymbol and if argSymbol$size is NA adds default sizes.
add_missing_size <- function(argSymbol, vector_size = 3, matrix_size = c(3, 4)) {
if (any(is.na(argSymbol$size))) {
if (argSymbol$nDim == 1)
argSymbol$size <- vector_size
else if (argSymbol$nDim == 2)
argSymbol$size <- matrix_size
}
invisible(argSymbol)
}
arg_type_2_input <- function(argType, input_gen_fun = NULL, size = NULL, return_function = FALSE) {
argSymbol <- add_missing_size(
nimble:::argType2symbol(argType)
)
type <- argSymbol$type
nDim <- argSymbol$nDim
if(is.null(size))
size <- argSymbol$size
if (is.null(input_gen_fun))
input_gen_fun <- switch(
type,
"double" = function(arg_size) rnorm(prod(arg_size)),
"integer" = function(arg_size) rgeom(prod(arg_size), 0.5),
"logical" = function(arg_size)
sample(c(TRUE, FALSE), prod(arg_size), replace = TRUE)
)
if(is.character(input_gen_fun)) {
new_fun <- function(size) {}
body(new_fun) <- parse(text = input_gen_fun, keep.source = FALSE)[[1]]
input_gen_fun <- new_fun
}
ans <- function() {
arg <- switch(
nDim + 1,
input_gen_fun(1), ## nDim is 0
input_gen_fun(prod(size)), ## nDim is 1
matrix(input_gen_fun(prod(size)), nrow = size[1], ncol = size[2]), ## nDim is 2
array(input_gen_fun(prod(size)), dim = size) ## nDim is 3
)
if(is.null(arg))
stop('Something went wrong while making test input.', call.=FALSE)
arg
}
if(return_function)
return(ans)
ans()
}
modify_on_match <- function(x, pattern, key, value, env = parent.frame(), ...) {
## Modify any elements of a named list that match pattern.
##
## @param x A named list of lists.
## @param pattern A regex pattern to compare with `names(x)`.
## @param key The key to modify in any lists whose names match `pattern`.
## @param value The new value for `key`.
## @param env The environment in which to modify `x`.
## @param ... Additional arguments for `grepl`.
for (name in names(x)) {
if (grepl(pattern, name, ...)) {
eval(substitute(x[[name]][[key]] <- value), env)
}
}
}
nim_all_equal <- function(x, y, tolerance = .Machine$double.eps^0.5, abs_threshold = 0, xlab = NULL, ylab = NULL, verbose = FALSE, info = "") {
if(is.null(xlab))
xlab <- deparse1(substitute(x))
if(is.null(ylab))
ylab <- deparse1(substitute(y))
denom <- abs(y)
## Use absolute tolerance for sufficiently small values.
## This is necessary for y values exactly zero.
## In some cases (such as derivatives very near zero and affected by floating point errors)
## we also need to use absolute tolerance.
denom[denom <= abs_threshold] <- 1
rel_diff <- abs((x-y)/denom)
result <- rel_diff < tolerance
all_result <- all(result)
if(is.na(all_result)) {
if(verbose) {
wh <- which(is.na(x) | is.na(y))
report <- cbind(x[wh], y[wh])
cat("\n******************\n")
cat("Detected some NA values ", info, ": ", xlab, " ", ylab, ".\n")
print(report)
cat("******************\n")
}
return(FALSE)
}
if(verbose) {
if(!all_result) {
ord <- order(rel_diff, decreasing = TRUE)
wh <- 1:min(length(rel_diff), 5)
report <- cbind(x[ord[wh]], y[ord[wh]], rel_diff[ord[wh]])
report <- report[report[,3] >= tolerance, ]
cat("\n******************\n")
cat("Detected some values out of ", ifelse(abs_threshold == 0, "relative", "absolute or relative"), " tolerance ", info, ": ", xlab, " ", ylab, ".\n")
print(report)
cat("******************\n")
}
}
all_result
}
nim_expect_equal <- function(x, y, tolerance = .Machine$double.eps^0.5, abs_threshold = 0, xlab = NULL, ylab = NULL) {
if(is.null(xlab))
xlab <- deparse1(substitute(x))
if(is.null(ylab))
ylab <- deparse1(substitute(y))
expect_true(nim_all_equal(x, y, xlab = xlab, ylab = ylab, tolerance = tolerance, abs_threshold = abs_threshold, verbose = TRUE))
}
Any scripts or data that you put into this service are public.
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.