knitr::opts_chunk$set(echo=FALSE, warning=FALSE, message=FALSE,
                      fig.pos = 'h',
                      fig.align = 'center',
                      fig.height = 3,
                      fig.width = 4)

Elicited judgements

l <- params$fit$limits[, 1]
u <- params$fit$limits[, 2]
expert <- 1
sf <- 3
mydf <- data.frame(params$fit$probs[expert, ],  t(params$fit$vals) )
colnames(mydf)[1] <- "quantiles"
rownames(mydf) <- NULL
if(params$entry == "Quantiles"){
  subsection = ""
  knitr::kable(mydf)}
if(params$entry == "Roulette"){
  subsection = "Implied cumulative probabilities:"
  knitr::kable(params$chips)
}

expertnames <- rownames(params$fit$probs)

r subsection

if(params$entry == "Roulette"){
  mydf <- data.frame(values = params$fit$vals[1, ], 
                     t(params$fit$probs))
  knitr::kable(mydf)
}
if(params$fit$limits[expert, 1] == 0){
  x <- paste0("x")
  lower <- "`"}else{
    lower <- paste0("`", params$fit$limits[expert, 1], " + ")}

if(params$fit$limits[expert, 1] > 0){
  x <- paste0("(x-", params$fit$limits[expert, 1],")")}
if(params$fit$limits[expert, 1] < 0){
  x <- paste0("(x+", abs(params$fit$limits[expert, 1]),")")}
lx <- min(params$fit$vals[expert, ])
ux <- max(params$fit$vals[expert, ])

Distributions

All parameter values reported to 3 significant figures.

Normal

plotfit(params$fit, d = "normal")
mu <- signif(params$fit$Normal[, 1], sf)
sigsq <- signif(params$fit$Normal[, 2]^2, sf)

$$ f_X(x) = \frac{1}{\sqrt{2\pi\sigma^2}} \exp\left(-\frac{1}{2 \sigma^2}(x - \mu)^2\right),\quad -\infty<x<\infty, $$ with

normaldf <- data.frame(mu, sigsq)
colnames(normaldf) <- c("$\\mu$", "$\\sigma^2$")
rownames(normaldf) <- expertnames
knitr::kable(normaldf, escape = FALSE)

Sample n = 1000 random values (for expert r expertnames[1]) with the command

rnorm(n = 1000, mean =r mu[1], sd = sqrt(r sigsq[1]))


Student$-t$

m <- signif(params$fit$Student.t[, 1], 3)
s <- signif(params$fit$Student.t[, 2], 3)
tdf <- params$fit$Student.t[, 3]
plotfit(params$fit, d = "t")

$$ f_X(x) = \frac{\Gamma((\nu + 1)/2)}{\Gamma(\nu/2)\sigma\sqrt{\nu \pi}} \left(1 + \frac{1}{\nu}\left(\frac{x - \mu}{\sigma}\right)^2\right)^{-(\nu + 1)/2},\quad -\infty<x<\infty, $$ with

tdataf <- data.frame(m, s, tdf)
colnames(tdataf) <- c("$\\mu$", "$\\sigma$", "$\\nu$")
rownames(tdataf) <- expertnames
knitr::kable(tdataf, escape = FALSE)

Sample n = 1000 random values (for expert r expertnames[1]) with the command

r m[1]+r s[1]* rt(n = 1000, df =r tdf[1])


Skew normal

plotfit(params$fit, d = "skewnormal")
xi <- signif(params$fit$Skewnormal[, 1], sf)
omega <- signif(params$fit$Skewnormal[, 2], sf)
alpha <- signif(params$fit$Skewnormal[, 3], sf)

$$f_{X}(x)=\frac{2}{\omega}\phi\left(\frac{x-\xi}{\omega}\right)\Phi\left(\alpha\left(\frac{x-\xi}{\omega}\right)\right),\quad -\infty<x<\infty,$$ where $\phi(.)$ and $\Phi(.)$ are the probability density function and cumulative distribution function respectively of the standard normal distribution, and with

skewnormaldf <- data.frame(xi, omega, alpha)
colnames(skewnormaldf) <- c("$\\xi$", "$\\omega$", "$\\alpha$")
rownames(skewnormaldf) <- expertnames
knitr::kable(skewnormaldf, escape = FALSE)

Sample n = 1000 random values (for expert r expertnames[1]) with the command

sn::rsn(n = 1000, xi =r xi[1], omega =r omega[1], alpha =r alpha[1])


Log normal

plotfit(params$fit, d = "lognormal")

$$f_X(x) = \frac{1}{x-L} \times \frac{1}{\sqrt{2\pi\sigma^2}} \exp\left(-\frac{1}{2\sigma^2}(\ln (x-L) - \mu)^2\right), \quad x >L,$$

and $f_X(x)=0$ otherwise, with

mu <- signif(params$fit$Log.normal[, 1], 3)
sigsq <- signif(params$fit$Log.normal[, 2]^2, 3)  
lnormdf <- data.frame(mu, sigsq, l)
colnames(lnormdf) <- c("$\\mu$", "$\\sigma^2$", "$L$")
rownames(lnormdf) <- expertnames
knitr::kable(lnormdf, escape = FALSE)

Sample n = 1000 random values (for expert r expertnames[1]) with the command

r l[1]+ rlnorm(n = 1000, meanlog =r mu[1], sdlog =r signif(sqrt(sigsq[1]), 3))


Gamma

plotfit(params$fit, d = "gamma")

$$f_X(x) =\frac{\beta ^ {\alpha}}{\Gamma(\alpha)}(x-L)^{\alpha - 1} \exp\left(- \beta (x-L)\right), \quad x >L,$$ and $f_X(x)=0$ otherwise, with

shape <- signif(params$fit$Gamma[, 1], 3)
rate <- signif(params$fit$Gamma[, 2], 3)  
gammadf <- data.frame(shape, rate, l)
colnames(gammadf) <- c("$\\alpha$", "$\\beta$", "$L$")
rownames(gammadf) <- expertnames
knitr::kable(gammadf, escape = FALSE)

Sample n = 1000 random values (for expert r expertnames[1]) with the command

r l[1]+ rgamma(n = 1000, shape =r shape[1], rate =r rate[1])


Log Student$-t$

plotfit(params$fit, d = "logt")

$$f_X(x) =\frac{1}{x-L} \times \frac{\Gamma((\nu + 1)/2)}{\Gamma(\nu/2)\times\sigma\times \sqrt{\nu \pi}} \left(1 + \frac{1}{\nu}\left(\frac{\ln (x-L) - \mu}{\sigma}\right)^2\right)^{-(\nu + 1)/2}, \quad x >L,$$ and $f_X(x)=0$ otherwise, with

m <- signif(params$fit$Log.Student.t[, 1], 3)
s <- signif(params$fit$Log.Student.t[, 2], 3)
tdf <- params$fit$Log.Student.t[, 3]

logtdf <- data.frame(m, s, tdf, l)
colnames(logtdf) <- c("$\\mu$", "$\\sigma$", "$\\nu$", "$L$")
rownames(logtdf) <- expertnames
knitr::kable(logtdf, escape = FALSE)

Sample n = 1000 random values (for expert r expertnames[1]) with the command

r l[1]+ exp(r m[1]+r s[1]* rt(n = 1000, df =r tdf[1]))


Beta

plotfit(params$fit, d = "beta")

$$f_X(x) = \frac{1}{U-L}\times\frac{\Gamma(\alpha + \beta)}{\Gamma(\alpha)\Gamma(\beta)} \left(\frac{x-L}{U-L}\right) ^{\alpha - 1} \left(1 - \left(\frac{x-L}{U-L}\right)\right)^{\beta - 1}, \quad L < x <U,$$ and $f_X(x) = 0$ otherwise, with

shape1 <- signif(params$fit$Beta[, 1], 3)
shape2 <- signif(params$fit$Beta[, 2], 3)

betadf <- data.frame(shape1, shape2, l, params$fit$limits[, 2])
colnames(betadf) <- c("$\\alpha$", "$\\beta$", "$L$", "$U$")
rownames(betadf) <- expertnames
knitr::kable(betadf, escape = FALSE)

Sample n = 1000 random values (for expert r expertnames[1]) with the command

r l[1]+ (r params$fit$limits[1, 2] - l[1] ) * rbeta(n = 1000, shape1 =r shape1[1], shape2 =r shape2[1])


Mirror log normal

plotfit(params$fit, d = "mirrorlognormal")

$$f_X(x) = \frac{1}{U-x} \times \frac{1}{\sqrt{2\pi\sigma^2}} \exp\left(-\frac{1}{2\sigma^2}(\ln (U-x) - \mu)^2\right), \quad x <U,$$

and $f_X(x)=0$ otherwise, with

mu <- signif(params$fit$mirrorlognormal[, 1], 3)
sigsq <- signif(params$fit$mirrorlognormal[, 2]^2, 3)  
lnormdf <- data.frame(mu, sigsq, u)
colnames(lnormdf) <- c("$\\mu$", "$\\sigma^2$", "$U$")
rownames(lnormdf) <- expertnames
knitr::kable(lnormdf, escape = FALSE)

Sample n = 1000 random values (for expert r expertnames[1]) with the command

r u[1]- rlnorm(n = 1000, meanlog =r mu[1], sdlog =r signif(sqrt(sigsq[1]), 3))


Mirror gamma

plotfit(params$fit, d = "mirrorgamma")

$$f_X(x) =\frac{\beta ^ {\alpha}}{\Gamma(\alpha)}(U-x)^{\alpha - 1} \exp\left(- \beta (U-x)\right), \quad x <U,$$ and $f_X(x)=0$ otherwise, with

shape <- signif(params$fit$mirrorgamma[, 1], 3)
rate <- signif(params$fit$mirrorgamma[, 2], 3)  
gammadf <- data.frame(shape, rate, u)
colnames(gammadf) <- c("$\\alpha$", "$\\beta$", "$U$")
rownames(gammadf) <- expertnames
knitr::kable(gammadf, escape = FALSE)

Sample n = 1000 random values (for expert r expertnames[1]) with the command

r u[1]- rgamma(n = 1000, shape =r shape[1], rate =r rate[1])


Mirror log Student$-t$

plotfit(params$fit, d = "mirrorlogt")

$$f_X(x) =\frac{1}{U-x} \times \frac{\Gamma((\nu + 1)/2)}{\Gamma(\nu/2)\times\sigma\times \sqrt{\nu \pi}} \left(1 + \frac{1}{\nu}\left(\frac{\ln (U-x) - \mu}{\sigma}\right)^2\right)^{-(\nu + 1)/2}, \quad x <U,$$ and $f_X(x)=0$ otherwise, with

m <- signif(params$fit$mirrorlogt[, 1], 3)
s <- signif(params$fit$mirrorlogt[, 2], 3)
tdf <- params$fit$mirrorlogt[, 3]

logtdf <- data.frame(m, s, tdf, u)
colnames(logtdf) <- c("$\\mu$", "$\\sigma$", "$\\nu$", "$U$")
rownames(logtdf) <- expertnames
knitr::kable(logtdf, escape = FALSE)

Sample n = 1000 random values (for expert r expertnames[1]) with the command

r u[1]- exp(r m[1]+r s[1]* rt(n = 1000, df =r tdf[1]))




OakleyJ/SHELF documentation built on March 17, 2024, 8:13 p.m.