source("ui/IRT/DIRT.R")
IRT <- navbarMenu("IRT models",
"Dichotomous models",
# * RASCH ####
tabPanel("Rasch",
h3("Rasch model"),
p('Item Response Theory (IRT) models are mixed-effect regression models in which
respondent ability (theta) is assumed to be a random effect and is estimated together with item
paramters. Ability (theta) is often assumed to follow normal distibution.'),
p('In',
strong('Rasch model'), '(Rasch, 1960), all items are assumed to have the same slope in inflection point, i.e., the
same discrimination parameter', strong('a'), 'which is fixed to value of 1. Items may differ in location of their inflection point, i.e. they may differ in difficulty parameter',
strong('b.')),
h4("Equation"),
('$$\\mathrm{P}\\left(Y_{ij} = 1\\vert \\theta_{i}, b_{j} \\right) = \\frac{e^{\\left(\\theta_{i}-b_{j}\\right) }}{1+e^{\\left(\\theta_{i}-b_{j}\\right) }} $$'),
h4("Item characteristic curves"),
plotOutput('rasch_mirt'),
downloadButton("DP_rasch_mirt", label = "Download figure"),
h4("Item information curves"),
plotOutput('raschiic_mirt'),
downloadButton("DP_raschiic_mirt", label = "Download figure"),
h4("Test information function"),
plotOutput('raschtif_mirt'),
downloadButton("DP_raschtif_mirt", label = "Download figure"),
h4("Table of parameters with item fit statistics"),
p('Estimates of parameters are completed by SX2 item fit statistics (Ames & Penfield, 2015).
SX2 is computed only when no missing data are present. In such a case consider using imputed dataset!'),
tableOutput('raschcoef_mirt'),
h4('Scatter plot of factor scores and standardized total scores'),
textOutput('raschFactorCor_mirt'),
plotOutput('raschFactor_mirt'),
downloadButton("DP_raschFactor_mirt", label = "Download figure"),
h4('Wright map'),
p('Wright map (Wilson, 2005; Wright & Stone, 1979), also called item-person map, is a graphical tool
to display person ability estimates and item parameters. The person side
(left) represents histogram of estimated abilities of respondents.
The item side (right) displays estimates of difficulty parameters of individual items. '),
plotOutput('raschWrightMap_mirt'),
downloadButton('DP_raschWM_mirt', label = "Download figure"),
br(),
h4("Selected R code"),
div(code(HTML("library(difNLR)<br>library(mirt) <br>library(ShinyItemAnalysis)<br><br># loading data<br>data(GMAT) <br>data <- GMAT[, 1:20] <br><br># fitting Rasch model<br>fit <- mirt(data, model = 1, itemtype = 'Rasch', SE = T) <br><br># Item Characteristic Curves <br>plot(fit, type = 'trace', facet_items = F) <br># Item Information Curves <br>plot(fit, type = 'infotrace', facet_items = F) <br># Test Information Function <br>plot(fit, type = 'infoSE') <br><br># Coefficients <br>coef(fit, simplify = TRUE) <br>coef(fit, IRTpars = TRUE, simplify = TRUE) <br><br># Item fit statistics <br>itemfit(fit) <br><br># Factor scores vs Standardized total scores <br>fs <- as.vector(fscores(fit)) <br>sts <- as.vector(scale(apply(data, 1, sum))) <br>plot(fs ~ sts) <br><br># Wright Map <br>b <- sapply(1:ncol(data), function(i) coef(fit)[[i]][, 'd']) <br>ggWrightMap(fs, b)"))),
br()
),
# * 1PL ####
tabPanel("1PL",
h3("One parameter Item Response Theory model"),
p('Item Response Theory (IRT) models are mixed-effect regression models in which
respondent ability (theta) is assumed to be a random effect and is estimated together with item
paramters. Ability (theta) is often assumed to follow normal distibution.'),
p('In', strong('1PL IRT model,'), 'all items are assumed to have the same slope in inflection point, i.e., the
same discrimination', strong('a.'), ' Items can differ in location of their inflection point, i.e., in item difficulty parameters',
strong('b.')),
h4("Equation"),
('$$\\mathrm{P}\\left(Y_{ij} = 1\\vert \\theta_{i}, a, b_{j} \\right) = \\frac{e^{a\\left(\\theta_{i}-b_{j}\\right) }}{1+e^{a\\left(\\theta_{i}-b_{j}\\right) }} $$'),
h4("Item characteristic curves"),
plotOutput('oneparamirt_mirt'),
downloadButton("DP_oneparamirt_mirt", label = "Download figure"),
h4("Item information curves"),
plotOutput('oneparamirtiic_mirt'),
downloadButton("DP_oneparamirtiic_mirt", label = "Download figure"),
h4("Test information function"),
plotOutput('oneparamirttif_mirt'),
downloadButton("DP_oneparamirttif_mirt", label = "Download figure"),
h4("Table of parameters with item fit statistics"),
p('Estimates of parameters are completed by SX2 item fit statistics
(Ames & Penfield, 2015). SX2 is computed only when no missing data are present.
In such a case consider using imputed dataset!'),
tableOutput('oneparamirtcoef_mirt'),
h4('Scatter plot of factor scores and standardized total scores'),
textOutput('oneparamirtFactorCor_mirt'),
plotOutput('oneparamirtFactor_mirt'),
downloadButton("DP_oneparamirtFactor_mirt", label = "Download figure"),
h4('Wright map'),
p('Wright map (Wilson, 2005; Wright & Stone, 1979), also called item-person map, is a graphical tool
to display person ability estimates and item parameters. The person side
(left) represents histogram of estimated abilities of respondents.
The item side (right) displays estimates of difficulty parameters of individual items. '),
plotOutput('oneparamirtWrightMap_mirt'),
downloadButton('DP_oneparamirtWM_mirt', label = "Download figure"),
br(),
h4("Selected R code"),
div(code(HTML("library(difNLR)<br>library(mirt) <br>library(ShinyItemAnalysis)<br><br># loading data<br>data(GMAT) <br>data <- GMAT[, 1:20] <br><br># fitting 1PL model<br>fit <- mirt(data, model = 1, itemtype = '2PL', constrain = list((1:ncol(data)) + seq(0, (ncol(data) - 1)*3, 3)), SE = T) <br><br># Item Characteristic Curves <br>plot(fit, type = 'trace', facet_items = F) <br># Item Information Curves <br>plot(fit, type = 'infotrace', facet_items = F) <br># Test Information Function <br>plot(fit, type = 'infoSE') <br><br># Coefficients <br>coef(fit, simplify = TRUE) <br>coef(fit, IRTpars = TRUE, simplify = TRUE) <br><br># Item fit statistics <br>itemfit(fit) <br><br># Factor scores vs Standardized total scores <br>fs <- as.vector(fscores(fit)) <br>sts <- as.vector(scale(apply(data, 1, sum))) <br>plot(fs ~ sts) <br><br># Wright Map <br>b <- sapply(1:ncol(data), function(i) coef(fit)[[i]][, 'd']) <br>ggWrightMap(fs, b)<br><br><br><br># You can also use ltm library for IRT models <br># fitting 1PL model<br>fit <- rasch(data) <br># for Rasch model use <br># fit <- rasch(data, constraint = cbind(ncol(data) + 1, 1)) <br><br># Item Characteristic Curves <br>plot(fit) <br># Item Information Curves <br>plot(fit, type = 'IIC') <br># Test Information Function <br>plot(fit, items = 0, type = 'IIC') <br><br># Coefficients <br>coef(fit) <br><br># Factor scores vs Standardized total scores <br>df1 <- ltm::factor.scores(fit, return.MIvalues = T)$score.dat <br>FS <- as.vector(df1[, 'z1']) <br>df2 <- df1 <br>df2$Obs <- df2$Exp <- df2$z1 <- df2$se.z1 <- NULL <br>STS <- as.vector(scale(apply(df2, 1, sum))) <br>df <- data.frame(FS, STS) <br>plot(FS ~ STS, data = df, xlab = 'Standardized total score', ylab = 'Factor score')"))),
br()
),
# * 2PL ####
tabPanel("2PL ",
h3("Two parameter Item Response Theory model"),
p('Item Response Theory (IRT) models are mixed-effect regression models in which
respondent ability (theta) is assumed to be a random effect and is estimated together with item
paramters. Ability (theta) is often assumed to follow normal distibution.'),
p(strong('2PL IRT model'), ' allows for different slopes in inflection point, i.e., different
discrimination parameters', strong('a.'), 'Items can also differ in location of their inflection point, i.e., in item difficulty parameters',
strong('b.')),
h4("Equation"),
('$$\\mathrm{P}\\left(Y_{ij} = 1\\vert \\theta_{i}, a_{j}, b_{j}\\right) = \\frac{e^{a_{j}\\left(\\theta_{i}-b_{j}\\right) }}{1+e^{a_{j}\\left(\\theta_{i}-b_{j}\\right) }} $$'),
h4("Item characteristic curves"),
plotOutput('twoparamirt_mirt'),
downloadButton("DP_twoparamirt_mirt", label = "Download figure"),
h4("Item information curves"),
plotOutput('twoparamirtiic_mirt'),
downloadButton("DP_twoparamirtiic_mirt", label = "Download figure"),
h4("Test information function"),
plotOutput('twoparamirttif_mirt'),
downloadButton("DP_twoparamirttif_mirt", label = "Download figure"),
h4("Table of parameters with item fit statistics"),
p('Estimates of parameters are completed by SX2 item fit statistics (Ames & Penfield, 2015).
SX2 is computed only when no missing data are present. In such a case consider using imputed dataset!'),
tableOutput('twoparamirtcoef_mirt'),
h4('Scatter plot of factor scores and standardized total scores'),
textOutput('twoparamirtFactorCor_mirt'),
plotOutput('twoparamirtFactor_mirt'),
downloadButton("DP_twoparamirtFactor_mirt", label = "Download figure"),
br(),
h4("Selected R code"),
div(code('library(difNLR)'),
br(),
code('library(mirt)'),
br(),
code('data(GMAT)'),
br(),
code('data <- GMAT[, 1:20]'),
br(),
br(),
code('# Model'),
br(),
code('fit <- mirt(data, model = 1, itemtype = "2PL", SE = T)'),
br(),
code('# Item Characteristic Curves'),
br(),
code('plot(fit, type = "trace", facet_items = F)'),
br(),
code('# Item Information Curves'),
br(),
code('plot(fit, type = "infotrace", facet_items = F)'),
br(),
code('# Test Information Function'),
br(),
code('plot(fit, type = "infoSE")'),
br(),
code('# Coefficients'),
br(),
code('coef(fit, simplify = TRUE)'),
br(),
code('coef(fit, IRTpars = TRUE, simplify = TRUE)'),
br(),
code('# Item fit statistics'),
br(),
code('itemfit(fit)'),
br(),
code('# Factor scores vs Standardized total scores'),
br(),
code('fs <- as.vector(fscores(fit))'),
br(),
code('sts <- as.vector(scale(apply(data, 1, sum)))'),
br(),
code('plot(fs ~ sts)'),
br(),
br(),
br(),
code('# You can also use ltm library for IRT models'),
br(),
code('library(difNLR)'),
br(),
code('library(ltm)'),
br(),
code('data(GMAT)'),
br(),
code('data <- GMAT[, 1:20]'),
br(),
br(),
code('# Model'),
br(),
code('fit <- ltm(data ~ z1, IRT.param = TRUE)'),
br(),
code('# Item Characteristic Curves'),
br(),
code('plot(fit)'),
br(),
code('# Item Information Curves'),
br(),
code('plot(fit, type = "IIC")'),
br(),
code('# Test Information Function'),
br(),
code('plot(fit, items = 0, type = "IIC")'),
br(),
code('# Coefficients'),
br(),
code('coef(fit)'),
br(),
code('# Factor scores vs Standardized total scores'),
br(),
code('df1 <- ltm::factor.scores(fit, return.MIvalues = T)$score.dat'),
br(),
code('FS <- as.vector(df1[, "z1"])'),
br(),
code('df2 <- df1'),
br(),
code('df2$Obs <- df2$Exp <- df2$z1 <- df2$se.z1 <- NULL'),
br(),
code('STS <- as.vector(scale(apply(df2, 1, sum)))'),
br(),
code('df <- data.frame(FS, STS)'),
br(),
code('plot(FS ~ STS, data = df,
xlab = "Standardized total score",
ylab = "Factor score")')),
br()
),
# * 3PL ####
tabPanel("3PL ",
h3("Three parameter Item Response Theory model"),
p('Item Response Theory (IRT) models are mixed-effect regression models in which
respondent ability (theta) is assumed to be a random effect and is estimated together with item
paramters. Ability (theta) is often assumed to follow normal distibution.'),
p(strong('3PL IRT model'), ' allows for different discriminations of items', strong('a,'),
'different item difficulties',
strong('b,'), 'and allows also for nonzero left asymptote, pseudo-guessing', strong('c.')),
h4("Equation"),
('$$\\mathrm{P}\\left(Y_{ij} = 1\\vert \\theta_{i}, a_{j}, b_{j}, c_{j} \\right) = c_{j} + \\left(1 - c_{j}\\right) \\cdot \\frac{e^{a_{j}\\left(\\theta_{i}-b_{j}\\right) }}{1+e^{a_{j}\\left(\\theta_{i}-b_{j}\\right) }} $$'),
uiOutput("irt_3PL_model_converged"),
h4("Item characteristic curves"),
plotOutput('threeparamirt_mirt'),
downloadButton("DP_threeparamirt_mirt", label = "Download figure"),
h4("Item information curves"),
plotOutput('threeparamirtiic_mirt'),
downloadButton("DP_threeparamirtiic_mirt", label = "Download figure"),
h4("Test information function"),
plotOutput('threeparamirttif_mirt'),
downloadButton("DP_threeparamirttif_mirt", label = "Download figure"),
h4("Table of parameters with item fit statistics"),
p('Estimates of parameters are completed by SX2 item fit statistics (Ames & Penfield, 2015).
SX2 is computed only when no missing data are present. In such a case consider using imputed dataset!'),
tableOutput('threeparamirtcoef_mirt'),
h4('Scatter plot of factor scores and standardized total scores'),
textOutput('threeparamirtFactorCor_mirt'),
plotOutput('threeparamirtFactor_mirt'),
downloadButton("DP_threeparamirtFactor_mirt", label = "Download figure"),
br(),
h4("Selected R code"),
div(code('library(difNLR)'),
br(),
code('library(mirt)'),
br(),
code('data(GMAT)'),
br(),
code('data <- GMAT[, 1:20]'),
br(),
br(),
code('# Model'),
br(),
code('fit <- mirt(data, model = 1, itemtype = "3PL", SE = T)'),
br(),
code('# Item Characteristic Curves'),
br(),
code('plot(fit, type = "trace", facet_items = F)'),
br(),
code('# Item Information Curves'),
br(),
code('plot(fit, type = "infotrace", facet_items = F)'),
br(),
code('# Test Information Function'),
br(),
code('plot(fit, type = "infoSE")'),
br(),
code('# Coefficients'),
br(),
code('coef(fit, simplify = TRUE)'),
br(),
code('coef(fit, IRTpars = TRUE, simplify = TRUE)'),
br(),
code('# Item fit statistics'),
br(),
code('itemfit(fit)'),
br(),
code('# Factor scores vs Standardized total scores'),
br(),
code('fs <- as.vector(fscores(fit))'),
br(),
code('sts <- as.vector(scale(apply(data, 1, sum)))'),
br(),
code('plot(fs ~ sts)'),
br(),
code('# You can also use ltm library for IRT models'),
br(),
br(),
br(),
code('library(difNLR)'),
br(),
code('library(ltm)'),
br(),
code('data(GMAT)'),
br(),
code('data <- GMAT[, 1:20]'),
br(),
br(),
code('# Model'),
br(),
code('fit <- tpm(data, IRT.param = TRUE)'),
br(),
code('# Item Characteristic Curves'),
br(),
code('plot(fit)'),
br(),
code('# Item Information Curves'),
br(),
code('plot(fit, type = "IIC")'),
br(),
code('# Test Information Function'),
br(),
code('plot(fit, items = 0, type = "IIC")'),
br(),
code('# Coefficients'),
br(),
code('coef(fit)'),
br(),
code('# Factor scores vs Standardized total scores'),
br(),
code('df1 <- ltm::factor.scores(fit, return.MIvalues = T)$score.dat'),
br(),
code('FS <- as.vector(df1[, "z1"])'),
br(),
code('df2 <- df1'),
br(),
code('df2$Obs <- df2$Exp <- df2$z1 <- df2$se.z1 <- NULL'),
br(),
code('STS <- as.vector(scale(apply(df2, 1, sum)))'),
br(),
code('df <- data.frame(FS, STS)'),
br(),
code('plot(FS ~ STS, data = df,
xlab = "Standardized total score",
ylab = "Factor score")')),
br()),
# * 4PL ####
tabPanel("4PL ",
h3("Four parameter Item Response Theory model"),
p('Item Response Theory (IRT) models are mixed-effect regression models in which
respondent ability (theta) is assumed to be a random effect and is estimated together with item
paramters. Ability (theta) is often assumed to follow normal distibution.'),
p(strong('4PL IRT model'), ' allows for different discriminations of items', strong('a,'),
'different item difficulties', strong('b,'), 'nonzero left asymptote, i.e. pseudo-guessing parameter', strong('c,'),
'and also for upper asymptote lower than one, i.e, inattention parameter', strong('d.')),
h4("Equation"),
('$$\\mathrm{P}\\left(Y_{ij} = 1\\vert \\theta_{i}, a_{j}, b_{j}, c_{j}, d_{j} \\right) = c_{j} + \\left(d_{j} - c_{j}\\right) \\cdot \\frac{e^{a_{j}\\left(\\theta_{i}-b_{j}\\right) }}{1+e^{a_{j}\\left(\\theta_{i}-b_{j}\\right) }} $$'),
uiOutput("irt_4PL_model_converged"),
h4("Item characteristic curves"),
plotOutput('irt_4PL_icc'),
downloadButton("DB_irt_4PL_icc", label = "Download figure"),
h4("Item information curves"),
plotOutput('irt_4PL_iic'),
downloadButton("DB_irt_4PL_iic", label = "Download figure"),
h4("Test information function"),
plotOutput('irt_4PL_tif'),
downloadButton("DB_irt_4PL_tif", label = "Download figure"),
h4("Table of parameters with item fit statistics"),
p('Estimates of parameters are completed by SX2 item fit statistics (Ames & Penfield, 2015).
SX2 is computed only when no missing data are present. In such a case consider using imputed dataset!'),
tableOutput('irt_4PL_coef'),
h4('Scatter plot of factor scores and standardized total scores'),
textOutput('irt_4PL_factorscores_correlation'),
plotOutput('irt_4PL_factorscores_plot'),
downloadButton("DB_irt_4PL_factorscores_plot", label = "Download figure"),
br(),
h4("Selected R code"),
div(code('library(difNLR)'),
br(),
code('library(mirt)'),
br(),
code('data(GMAT)'),
br(),
code('data <- GMAT[, 1:20]'),
br(),
br(),
code('# Model'),
br(),
code('fit <- mirt(data, model = 1, itemtype = "4PL", SE = T)'),
br(),
code('# Item Characteristic Curves'),
br(),
code('plot(fit, type = "trace", facet_items = F)'),
br(),
code('# Item Information Curves'),
br(),
code('plot(fit, type = "infotrace", facet_items = F)'),
br(),
code('# Test Information Function'),
br(),
code('plot(fit, type = "infoSE")'),
br(),
code('# Coefficients'),
br(),
code('coef(fit, simplify = TRUE)'),
br(),
code('coef(fit, IRTpars = TRUE, simplify = TRUE)'),
br(),
code('# Item fit statistics'),
br(),
code('itemfit(fit)'),
br(),
code('# Factor scores vs Standardized total scores'),
br(),
code('fs <- as.vector(fscores(fit))'),
br(),
code('sts <- as.vector(scale(apply(data, 1, sum)))'),
br(),
code('plot(fs ~ sts)')),
br()),
# * MODEL COMPARISON ####
tabPanel("Model comparison ",
h3("Item Response Theory model selection"),
p('Item Response Theory (IRT) models are mixed-effect regression models in which
respondent ability (theta) is assumed to be a random effect and is estimated together with item
paramters. Ability (theta) is often assumed to follow normal distibution.'),
p('IRT models can be compared by several information criteria: '),
tags$ul(
tags$li(strong('AIC'), 'is the Akaike information criterion (Akaike, 1974), '),
tags$li(strong('AICc'), 'is AIC with a correction for finite sample size, '),
tags$li(strong('BIC'), 'is the Bayesian information criterion (Schwarz, 1978).'),
tags$li(strong('SABIC'), 'is the Sample-sized adjusted BIC criterion, ')
),
p('Another approach to compare IRT models can be likelihood ratio chi-squared test.
Significance level is set to 0.05.'),
h4("Table of comparison statistics"),
p('Row ', strong('BEST'), 'indicates which model has the lowest value of criterion, or is the largest
significant model by likelihood ratio test.'),
tableOutput('irtcomparison'),
tags$style(type = "text/css", "#irtcomparison tr:last-child {font-weight:bold;}"),
br(),
h4("Selected R code"),
div(code(HTML("library(difNLR) <br>library(mirt)<br><br># loading data<br>data(GMAT) <br>data <- GMAT[, 1:20] <br><br># 1PL IRT model <br>s <- paste(\"F = 1-\", ncol(data), \"\\n\",<br> \"CONSTRAIN = (1-\", ncol(data), \", a1)\")<br>model <- mirt.model(s)<br>fit1PL <- mirt(data, model = model, itemtype = \"2PL\")<br># 2PL IRT model <br>fit2PL <- mirt(data, model = 1, itemtype = \"2PL\") <br># 3PL IRT model <br>fit3PL <- mirt(data, model = 1, itemtype = \"3PL\") <br># 4PL IRT model <br>fit4PL <- mirt(data, model = 1, itemtype = \"4PL\") <br><br># comparison <br>anova(fit1PL, fit2PL) <br>anova(fit2PL, fit3PL) <br>anova(fit3PL, fit4PL)"))),
br()
),
"----",
"Polytomous models",
# * BOCK'S NOMINAL MODEL ####
tabPanel("Bock's nominal model",
h3("Bock's nominal Item Response Theory model"),
p('The nominal response model (NRM) was introduced by Bock (1972) as a way to model
responses to items with two or more nominal categories. This model is suitable for
multiple-choice items with no particular ordering of distractors.
It is also generalization of some models for ordinal data, e.g. generalized partial credit model (GPCM)
or its restricted versions partial credit model (PCM) and rating scale model (RSM).'),
h4('Equation'),
withMathJax('For ', strong('K'), ' possible test choices is the probability of the choice ', strong('k'), ' for
person ', strong('i'), ' with latent trait', strong('\\(\\theta\\)'), ' in item ', strong('j'),
'given by the following equation: '),
('$$\\mathrm{P}(Y_{ij} = k|\\theta_i, a_{j1}, al_{j(l-1)}, d_{j(l-1)}, l = 1, \\dots, K) =
\\frac{e^{(ak_{j(k-1)} * a_{j1} * \\theta_i + d_{j(k-1)})}}{\\sum_l e^{(al_{j(l-1)} * a_{j1} * \\theta_i + d_{j(l-1)})}}$$'),
br(),
h4("Item characteristic curves"),
plotOutput('bock_CC'),
downloadButton("DP_bock_CC", label = "Download figure"),
h4("Item information curves"),
plotOutput('bock_IIC'),
downloadButton("DP_bock_IIC", label = "Download figure"),
h4("Test information function"),
plotOutput('bock_TIF'),
downloadButton("DP_bock_TIF", label = "Download figure"),
h4("Table of parameters"),
textOutput("bock_coef_warning"),
tableOutput('bock_coef'),
h4('Scatter plot of factor scores and standardized total scores'),
textOutput('bockFactorCorInput_mirt'),
plotOutput('bock_factor'),
downloadButton("DP_bock_factor", label = "Download figure"),
br(),
h4("Selected R code"),
div(code('library(difNLR)'),
br(),
code('library(mirt)'),
br(),
code('data(GMAT)'),
br(),
code('data <- GMAT[, 1:20]'),
br(),
br(),
code('# Model'),
br(),
code('fit <- mirt(data, model = 1, itemtype = "nominal")'),
br(),
code('# Item Characteristic Curves'),
br(),
code('plot(fit, type = "trace", facet_items = F)'),
br(),
code('# Item Information Curves'),
br(),
code('plot(fit, type = "infotrace", facet_items = F)'),
br(),
code('# Test Information Function'),
br(),
code('plot(fit, type = "infoSE")'),
br(),
code('# Coefficients'),
br(),
code('coef(fit, simplify = TRUE)'),
br(),
code('coef(fit, IRTpars = TRUE, simplify = TRUE)'),
br(),
code('# Factor scores vs Standardized total scores'),
br(),
code('fs <- as.vector(fscores(fit))'),
br(),
code('sts <- as.vector(scale(apply(data, 1, sum)))'),
br(),
code('plot(fs ~ sts)')),
br()
),
"----",
"Training",
# * TRAINING ####
# ** Dichotomous models ####
DIRT,
# ** Polytomous models ####
tabPanel("Polytomous models",
tabsetPanel(
# *** Intro ####
tabPanel('Intro',
h3("Polytomous models"),
p('Polytomous models are used when partial score is possible, or when items are graded
on Likert scale (e.g. from Totally disagree to Totally agree); some polytomous
models can also be used when analyzing multiple-choice items. In this section you
can explore item response functions of some polytomous models.'),
br(),
p('Two main classes of polytomous IRT models are considered:'),
p(strong('Difference models'), 'are defined by setting mathematical form to cumulative
probabilities, while category probabilities are calculated as their difference.
These models are also sometimes called', strong('cumulative logit models'), 'as they
set linear form to cumulative logits.'),
p('As an example, ', strong('Graded Response Model'), '(GRM; Samejima, 1970) uses 2PL
IRT model to describe cumulative probabilities (probabilities to obtain score higher
than 1, 2, 3, etc.). Category probabilities are then described as differences of two
subsequent cumulative probabilities. '), br(),
p('For', strong('divide-by-total models'), 'response category probabilities are defined
as the ratio between category-related functions and their sum. '),
p('In', strong('Generalized Partial Credit Model'), '(GPCM; Muraki, 1992), probability
of the successful transition from one category score to the next category score is
modelled by 2PL IRT model, while ', strong('Partial Credit Model'), '(PCM; Masters, 1982)
uses 1PL IRT model to describe this probability. Even more restricted version, the',
strong('Rating Scale Model'), '(RSM; Andrich, 1978) assumes exactly the same K response
categories for each item and threshold parameters which can be split into a response-threshold
parameter and an item-specific location parameter. These models are also sometimes called
', strong('adjacent-category logit models'), 'as they set linear form to adjacent logits.'),
p('To model distractor properties in multiple-choice items,', strong('Nominal Response Model'),
'(NRM; Bock, 1972) can be used. NRM is an IRT analogy of multinomial regression model. This
model is also generalization of GPCM/PCM/RSM ordinal models. NRM is also sometimes called
', strong('baseline-category logit model'), 'as it sets linear form to log of odds of selecting given category
to selecting a baseline category. Baseline can be chosen arbitrary, although usually the correct
answer or the first answer is chosen.')
),
# *** Graded response model ####
tabPanel('Graded response model',
h3("Graded response model"),
p("Graded response model (GRM; Samejima, 1970) uses 2PL IRT model to describe cumulative probabilities
(probabilities to obtain score higher than 1, 2, 3, etc.). Category probabilities are then described
as differences of two subsequent cumulative probabilities. "),
p("It belongs to class of difference models, which are defined by setting mathematical form to cumulative
probabilities, while category probabilities are calculated as their difference. These models are also
sometimes called cumulative logit models, as they set linear form to cumulative logits."),
h4("Parameters"),
p("Select number of responses and difficulty for cummulative probabilities", strong("b"), "and common
discrimination parameter", strong("a"), ". Cummulative probability \\(P(Y \\geq 0)\\) is always equal to 1
and it is not displayed, corresponding category probability \\(P(Y = 0)\\) is displayed with black color."),
div(style = "display: inline-block; vertical-align: middle; width: 18%;",
numericInput(inputId = "irt_training_grm_numresp",
label = "Highest score",
value = 4,
min = 2,
max = 6)),
br(),
div(style = "display: inline-block; vertical-align: middle; width: 18%;",
sliderInput(inputId = "irt_training_grm_a",
label = "a - discrimination",
value = 1,
min = 0,
max = 4,
step = 0.01)),
br(),
uiOutput("irt_training_grm_sliders"),
br(),
h4("Equations"),
('$$\\pi_k* = \\mathrm{P}\\left(Y \\geq k \\vert \\theta, a, b_k\\right) = \\frac{e^{a\\left(\\theta-b\\right) }}{1+e^{a\\left(\\theta-b\\right) }} $$'),
('$$\\pi_k =\\mathrm{P}\\left(Y = k \\vert \\theta, a, b_k, b_{k+1}\\right) = \\pi_k* - \\pi_{k+1}* $$'),
('$$\\mathrm{E}\\left(Y \\vert \\theta, a, b_1, \\dots, b_K\\right) = \\sum_{k = 0}^K k\\pi_k$$'),
h4("Plots"),
splitLayout(cellWidths = c("33%", "33%", "33%"),
plotlyOutput('irt_training_grm_plot_cummulative'),
plotlyOutput('irt_training_grm_plot_category'),
plotlyOutput('irt_training_grm_plot_expected')),
splitLayout(cellWidths = c("33%", "33%", "33%"),
downloadButton("DB_irt_training_grm_plot_cummulative", label = "Download figure"),
downloadButton("DB_irt_training_grm_plot_category", label = "Download figure"),
downloadButton("DB_irt_training_grm_plot_expected", label = "Download figure")),
h4("Selected R code"),
div(code(HTML("library(ggplot2) <br>library(data.table) <br><br># setting parameters <br>a <- 1 <br>b <- c(-1.5, -1, -0.5, 0) <br>theta <- seq(-4, 4, 0.01) <br><br># calculating cummulative probabilities <br>ccirt <- function(theta, a, b){ return(1/(1 + exp(-a*(theta - b)))) } <br>df1 <- data.frame(sapply(1:length(b), function(i) ccirt(theta, a, b[i])) , theta)<br>df1 <- melt(df1, id.vars = \"theta\") <br><br># plotting cummulative probabilities <br>ggplot(data = df1, aes(x = theta, y = value, col = variable)) + <br> geom_line() + <br> xlab(\"Ability\") + <br> ylab(\"Cummulative probability\") + <br> xlim(-4, 4) + <br> ylim(0, 1) + <br> theme_bw() + <br> theme(text = element_text(size = 14), <br> panel.grid.major = element_blank(), <br> panel.grid.minor = element_blank()) + <br> ggtitle(\"Cummulative probabilities\") + <br> scale_color_manual(\"\", values = c(\"red\", \"yellow\", \"green\", \"blue\"), labels = paste0(\"P(Y >= \", 1:4, \")\")) <br><br># calculating category probabilities <br>df2 <- data.frame(1, sapply(1:length(b), function(i) ccirt(theta, a, b[i]))) <br>df2 <- data.frame(sapply(1:length(b), function(i) df2[, i] - df2[, i+1]), df2[, ncol(df2)], theta) <br>df2 <- melt(df2, id.vars = \"theta\") <br><br># plotting category probabilities <br>ggplot(data = df2, aes(x = theta, y = value, col = variable)) + <br> geom_line() + <br> xlab(\"Ability\") + <br> ylab(\"Category probability\") + <br> xlim(-4, 4) + <br> ylim(0, 1) + <br> theme_bw() + <br> theme(text = element_text(size = 14), <br> panel.grid.major = element_blank(), <br> panel.grid.minor = element_blank()) + <br> ggtitle(\"Category probabilities\") + <br> scale_color_manual(\"\", values = c(\"black\", \"red\", \"yellow\", \"green\", \"blue\"), labels = paste0(\"P(Y >= \", 0:4, \")\"))<br><br># calculating expected item score<br>df3 <- data.frame(1, sapply(1:length(b), function(i) ccirt(theta, a, b[i]))) <br>df3 <- data.frame(sapply(1:length(b), function(i) df3[, i] - df3[, i+1]), df3[, ncol(df3)])<br>df3 <- data.frame(exp = as.matrix(df3) %*% 0:4, theta)<br><br># plotting category probabilities <br>ggplot(data = df3, aes(x = theta, y = exp)) + <br> geom_line() + <br> xlab(\"Ability\") + <br> ylab(\"Expected item score\") + <br> xlim(-4, 4) + <br> ylim(0, 4) + <br> theme_bw() + <br> theme(text = element_text(size = 14), <br> panel.grid.major = element_blank(), <br> panel.grid.minor = element_blank()) + <br> ggtitle(\"Expected item score\")"))),
br(),
br()
),
# *** Generalized partial credit model ####
tabPanel('Generalized partial credit model',
h3("Generalized partial credit model"),
p("In Generalized Partial Credit Model (GPCM; Muraki, 1992), probability of the successful transition
from one category score to the next category score is modelled by 2PL IRT model. The response category
probabilities are then ratios between category-related functions (cumulative sums of exponentials)
and their sum."),
p("Two simpler models can be derived from GPCM by restricting some parameters: Partial Credit Model
(PCM; Masters, 1982) uses 1PL IRT model to describe this probability, thus parameters a = 1.
Even more restricted version, the Rating Scale Model (RSM; Andrich, 1978) assumes exactly the same
K response categories for each item and threshold parameters which can be split into a response-threshold
parameter \\(\\lambda_t\\) and an item-specific location parameter \\(\\delta_i\\). These models are
also sometimes called adjacent logit models, as they set linear form to adjacent logits."),
h4("Parameters"),
p("Select number of responses and their threshold parameters ", strong("d"), "and common
discrimination parameter", strong("a"), ". With a = 1 you get PCM. Numerator of \\(\\pi_0 = P(Y = 0)\\) is
set to 1 and \\(\\pi_0\\) is displayed with black color."),
div(style = "display: inline-block; vertical-align: middle; width: 18%;",
numericInput(inputId = "irt_training_gpcm_numresp",
label = "Highest score",
value = 4,
min = 2,
max = 6)),
br(),
div(style = "display: inline-block; vertical-align: middle; width: 18%;",
sliderInput(inputId = "irt_training_gpcm_a",
label = "a - discrimination",
value = 1,
min = 0,
max = 4,
step = 0.01)),
br(),
uiOutput("irt_training_gpcm_sliders"),
br(),
h4("Equations"),
('$$\\pi_k =\\mathrm{P}\\left(Y = k \\vert \\theta, \\alpha, \\delta_0, \\dots, \\delta_K\\right) = \\frac{\\exp\\sum_{t = 0}^k \\alpha(\\theta - \\delta_t)}{\\sum_{r = 0}^K\\exp\\sum_{t = 0}^r \\alpha(\\theta - \\delta_t)} $$'),
('$$\\mathrm{E}\\left(Y \\vert \\theta, \\alpha, \\delta_0, \\dots, \\delta_K\\right) = \\sum_{k = 0}^K k\\pi_k$$'),
h4("Plots"),
splitLayout(cellWidths = c("50%", "50%"),
plotlyOutput('irt_training_gpcm_plot'),
plotlyOutput('irt_training_gpcm_plot_expected')),
splitLayout(cellWidths = c("50%", "50%"),
downloadButton('DB_irt_training_gpcm_plot', label = "Download figure"),
downloadButton('DB_irt_training_gpcm_plot_expected', label = "Download figure")),
h4("Selected R code"),
div(code(HTML("library(ggplot2) <br>library(data.table) <br><br># setting parameters <br>a <- 1 <br>d <- c(-1.5, -1, -0.5, 0) <br>theta <- seq(-4, 4, 0.01) <br><br># calculating category probabilities <br>ccgpcm <- function(theta, a, d){ a*(theta - d) } <br>df <- sapply(1:length(d), function(i) ccgpcm(theta, a, d[i])) <br>pk <- sapply(1:ncol(df), function(k) apply(as.data.frame(df[, 1:k]), 1, sum)) <br>pk <- cbind(0, pk) <br>pk <- exp(pk) <br>denom <- apply(pk, 1, sum) <br>df <- apply(pk, 2, function(x) x/denom)<br>df1 <- melt(data.frame(df, theta), id.vars = \"theta\") <br><br># plotting category probabilities <br>ggplot(data = df1, aes(x = theta, y = value, col = variable)) + <br> geom_line() + <br> xlab(\"Ability\") + <br> ylab(\"Category probability\") + <br> xlim(-4, 4) + <br> ylim(0, 1) + <br> theme_bw() + <br> theme(text = element_text(size = 14), <br> panel.grid.major = element_blank(), <br> panel.grid.minor = element_blank()) + <br> ggtitle(\"Category probabilities\") + <br> scale_color_manual(\"\", values = c(\"black\", \"red\", \"yellow\", \"green\", \"blue\"), labels = paste0(\"P(Y = \", 0:4, \")\"))<br><br># calculating expected item score<br>df2 <- data.frame(exp = as.matrix(df) %*% 0:4, theta)<br># plotting category probabilities <br>ggplot(data = df2, aes(x = theta, y = exp)) + <br> geom_line() + <br> xlab(\"Ability\") + <br> ylab(\"Expected item score\") + <br> xlim(-4, 4) + <br> ylim(0, 4) + <br> theme_bw() + <br> theme(text = element_text(size = 14), <br> panel.grid.major = element_blank(), <br> panel.grid.minor = element_blank()) + <br> ggtitle(\"Expected item score\")"))),
br(),
br()
),
# *** Nominal response model ####
tabPanel('Nominal response model',
h3("Nominal response model"),
p("In Nominal Response Model (NRM; Bock, 1972), probability of selecting given category over baseline
category is modelled by 2PL IRT model. This model is also sometimes called baseline-category logit
model, as it sets linear form to log of odds of selecting given category to selecting a baseline category.
Baseline can be chosen arbitrary, although usually the correct answer or the first answer is chosen.
NRM model is generalization of GPCM model by setting item-specific and category-specific intercept and
slope parameters."),
h4("Parameters"),
p("Select number of distractors and their threshold parameters ", strong("d"), "and discrimination parameters",
strong("a"), ". Parameters of \\(\\pi_0 = P(Y = 0)\\) are set to zeros and \\(\\pi_0\\) is displayed with black color."),
div(style = "display: inline-block; vertical-align: middle; width: 18%;",
numericInput(inputId = "irt_training_nrm_numresp",
label = "Number of distractors",
value = 4,
min = 2,
max = 6)),
br(),
uiOutput("irt_training_nrm_sliders"),
br(),
h4("Equations"),
('$$\\pi_k =\\mathrm{P}\\left(Y = k \\vert \\theta, \\alpha_0, \\dots, \\alpha_K, \\delta_0, \\dots, \\delta_K\\right) = \\frac{\\exp(\\alpha_k\\theta + \\delta_k)}{\\sum_{r = 0}^K\\exp(\\alpha_r\\theta + \\delta_r)} $$'),
h4("Plots"),
plotlyOutput('irt_training_nrm_plot'),
downloadButton("DB_irt_training_nrm_plot", label = "Download figure"),
h4("Selected R code"),
div(code(HTML("library(ggplot2) <br>library(data.table) <br><br># setting parameters <br>a <- c(2.5, 2, 1, 1.5) <br>d <- c(-1.5, -1, -0.5, 0) <br>theta <- seq(-4, 4, 0.01) <br><br># calculating category probabilities <br>ccnrm <- function(theta, a, d){ exp(d + a*theta) } <br>df <- sapply(1:length(d), function(i) ccnrm(theta, a[i], d[i])) <br>df <- data.frame(1, df) <br>denom <- apply(df, 1, sum) <br>df <- apply(df, 2, function(x) x/denom) <br>df1 <- melt(data.frame(df, theta), id.vars = \"theta\") <br><br># plotting category probabilities <br>ggplot(data = df1, aes(x = theta, y = value, col = variable)) + <br> geom_line() + <br> xlab(\"Ability\") + <br> ylab(\"Category probability\") + <br> xlim(-4, 4) + <br> ylim(0, 1) + <br> theme_bw() + <br> theme(text = element_text(size = 14), <br> panel.grid.major = element_blank(), <br> panel.grid.minor = element_blank()) + <br> ggtitle(\"Category probabilities\") + <br> scale_color_manual(\"\", values = c(\"black\", \"red\", \"yellow\", \"green\", \"blue\"), labels = paste0(\"P(Y = \", 0:4, \")\"))<br><br># calculating expected item score<br>df2 <- data.frame(exp = as.matrix(df) %*% 0:4, theta)<br><br># plotting expected item score<br>ggplot(data = df2, aes(x = theta, y = exp)) + <br> geom_line() + <br> xlab(\"Ability\") + <br> ylab(\"Expected item score\") + <br> xlim(-4, 4) + <br> ylim(0, 4) + <br> theme_bw() + <br> theme(text = element_text(size = 14), <br> panel.grid.major = element_blank(), <br> panel.grid.minor = element_blank()) + <br> ggtitle(\"Expected item score\")"))),
br(),
br()
)
)))
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.