R/lav_object_methods.R

Defines functions vartable partable parameterestimates standardizedsolution

Documented in parameterestimates partable standardizedsolution vartable

# `methods' for fitted lavaan objects
#
# standard (S4) methods:
# - show()
# - summary()
# - coef()
# - fitted.values() + fitted()
# - vcov()
# - logLik()
# - nobs()
# - update()
# - anova()

# lavaan-specific methods:
#
# - parameterEstimates()
# - standardizedSolution()
# - parameterTable()
# - varTable()


setMethod("show", "lavaan",
function(object) {
    # efa?
    efa.flag <- object@Options$model.type == "efa"

    # show only basic information
    res <- lav_object_summary(object, fit.measures = FALSE,
                                      estimates    = FALSE,
                                      modindices   = FALSE,
                                      efa          = efa.flag)
    if(efa.flag) {
        # print (standardized) loadings only
        class(res) <- c("lavaan.efa", "list")
        print(res)
    } else {
        # print lavaan header
        print(res)
    }
    invisible(res)
})

setMethod("summary", "lavaan",
function(object, header       = TRUE,
                 fit.measures = FALSE,
                 estimates    = TRUE,
                 ci           = FALSE,
                 fmi          = FALSE,
                 std          = FALSE,
                 standardized = FALSE,
                 remove.step1 = TRUE,
                 cov.std      = TRUE,
                 rsquare      = FALSE,
                 std.nox      = FALSE,
                 fm.args      = list(standard.test        = "default",
                                     scaled.test          = "default",
                                     rmsea.ci.level       = 0.90,
                                     rmsea.h0.closefit    = 0.05,
                                     rmsea.h0.notclosefit = 0.08,
                                     robust               = TRUE,
                                     cat.check.pd         = TRUE),
                 modindices   = FALSE,
                 nd = 3L, cutoff = 0.3, dot.cutoff = 0.1) {

    # efa?
    efa.flag <- object@Options$model.type == "efa"

    res <- lav_object_summary(object = object, header = header,
               fit.measures = fit.measures, estimates = estimates,
               ci = ci, fmi = fmi, std = std, standardized = standardized,
               remove.step1 = remove.step1, cov.std = cov.std,
               rsquare = rsquare, std.nox = std.nox, efa = efa.flag,
               fm.args = fm.args, modindices = modindices)
    # res has class c("lavaan.summary", "list")

    # what about nd? only used if we actually print; save as attribute
    attr(res, "nd") <- nd

    # if efa, add cutoff and dot.cutoff, and change class
    if(efa.flag) {
        #class(res) <- c("lavaan.summary.efa", "list")
        attr(res, "cutoff") <- cutoff
        attr(res, "dot.cutoff") <- dot.cutoff
    }

    res
})


setMethod("coef", "lavaan",
function(object, type="free", labels=TRUE) {
    lav_object_inspect_coef(object = object, type = type,
                            add.labels = labels, add.class = TRUE)
})

standardizedSolution <-
    standardizedsolution <- function(object,
                                     type = "std.all",
                                     se = TRUE,
                                     zstat = TRUE,
                                     pvalue = TRUE,
                                     ci = TRUE,
                                     level = 0.95,
                                     cov.std = TRUE,
                                     remove.eq = TRUE,
                                     remove.ineq = TRUE,
                                     remove.def = FALSE,
                                     partable = NULL,
                                     GLIST = NULL,
                                     est   = NULL,
                                     output = "data.frame") {

    stopifnot(type %in% c("std.all", "std.lv", "std.nox"))

    # check output= argument
    output <- tolower(output)
    if(output %in% c("data.frame", "table")) {
        output <- "data.frame"
    } else if(output %in% c("text", "pretty")) {
        output <- "text"
    } else {
        stop("lavaan ERROR: output must be ", sQuote("data.frame"),
             " or ", sQuote("text"))
    }

    # no zstat + pvalue if estimator is Bayes
    if(object@Options$estimator == "Bayes") {
        zstat <- pvalue <- FALSE
    }

    # no se if class is not lavaan
    # using class() -- can't use inherits(), as this includes blavaan
    if(class(object)[1L] != "lavaan") {
        if(missing(se) || !se) {
            se <- FALSE
            zstat <- FALSE
            pvalue <- FALSE
        }
    }

    if(is.null(partable)) {
        PARTABLE <- inspect(object, "list")
    } else {
        PARTABLE <- partable
    }
    LIST <- PARTABLE[,c("lhs", "op", "rhs", "exo")]
    if(!is.null(PARTABLE$group)) {
        LIST$group <- PARTABLE$group
    }
    if(!is.null(PARTABLE$block)) {
        LIST$block <- PARTABLE$block
    }
    if(sum(nchar(PARTABLE$label)) != 0L) {
      LIST$label <- PARTABLE$label
    }

    # add std and std.all columns
    if(type == "std.lv") {
        LIST$est.std     <- lav_standardize_lv(object, est = est, GLIST = GLIST,
                                               partable = partable, cov.std = cov.std)
    } else if(type == "std.all") {
        LIST$est.std <- lav_standardize_all(object, est = est, GLIST = GLIST,
                                            partable = partable, cov.std = cov.std)
    } else if(type == "std.nox") {
        LIST$est.std <- lav_standardize_all_nox(object, est = est, GLIST = GLIST,
                                                partable = partable, cov.std = cov.std)
    }

    if(object@Options$se != "none" && se) {
        # add 'se' for standardized parameters
        VCOV <- try(lav_object_inspect_vcov(object, standardized = TRUE,
                                            type = type, free.only = FALSE,
                                            add.labels = FALSE,
                                            add.class = FALSE))
        if(inherits(VCOV, "try-error") || is.null(VCOV)) {
            LIST$se <- rep(NA, length(LIST$lhs))
            if(zstat) {
                LIST$z  <- rep(NA, length(LIST$lhs))
            }
            if(pvalue) {
                 LIST$pvalue <- rep(NA, length(LIST$lhs))
            }
        } else {
            tmp <- diag(VCOV)
            # catch negative values
            min.idx <- which(tmp < 0)
            if(length(min.idx) > 0L) {
                tmp[min.idx] <- as.numeric(NA)
            }
            # now, we can safely take the square root
            tmp <- sqrt(tmp)

            # catch near-zero SEs
            zero.idx <- which(tmp < .Machine$double.eps^(1/4)) # was 1/2 < 0.6
                                                               # was 1/3 < 0.6-9
            if(length(zero.idx) > 0L) {
                tmp[zero.idx] <- 0.0
            }
            LIST$se <- tmp

            # add 'z' column
            if(zstat) {
                 tmp.se <- ifelse( LIST$se == 0.0, NA, LIST$se)
                 LIST$z <- LIST$est.std / tmp.se
            }
            if(zstat && pvalue) {
                 LIST$pvalue <- 2 * (1 - pnorm( abs(LIST$z) ))
            }
        }
    }

    # simple symmetric confidence interval
    if(se && object@Options$se != "none" && ci) {
        # next three lines based on confint.lm
        a <- (1 - level)/2; a <- c(a, 1 - a)
        fac <- qnorm(a)
        #if(object@Options$se != "bootstrap") {
            ci <- LIST$est.std + LIST$se %o% fac
        #} else {
        #    ci <- rep(as.numeric(NA), length(LIST$est.std)) + LIST$se %o% fac
        #}

        LIST$ci.lower <- ci[,1]; LIST$ci.upper <- ci[,2]
    }


    # if single group, remove group column
    if(object@Data@ngroups == 1L) LIST$group <- NULL

    # remove == rows?
    if(remove.eq) {
        eq.idx <- which(LIST$op == "==")
        if(length(eq.idx) > 0L) {
            LIST <- LIST[-eq.idx,]
        }
    }
    # remove <> rows?
    if(remove.ineq) {
        ineq.idx <- which(LIST$op %in% c("<",">"))
        if(length(ineq.idx) > 0L) {
            LIST <- LIST[-ineq.idx,]
        }
    }
    # remove := rows?
    if(remove.def) {
        def.idx <- which(LIST$op == ":=")
        if(length(def.idx) > 0L) {
            LIST <- LIST[-def.idx,]
        }
    }

    # always remove 'da' rows (if any)
    if(any(LIST$op == "da")) {
        da.idx <- which(LIST$op == "da")
        LIST <- LIST[-da.idx,,drop = FALSE]
    }



    if(output == "text") {
        class(LIST) <- c("lavaan.parameterEstimates", "lavaan.data.frame",
                         "data.frame")
        # LIST$exo is needed for printing, don't remove it
        attr(LIST, "group.label") <- object@Data@group.label
        attr(LIST, "level.label") <- object@Data@level.label
        #attr(LIST, "header") <- FALSE
    } else {
        LIST$exo <- NULL
        LIST$block <- NULL
        class(LIST) <- c("lavaan.data.frame", "data.frame")
    }

    LIST
}

parameterEstimates <-
parameterestimates <- function(object,

                               # select columns
                               se           = TRUE,
                               zstat        = TRUE,
                               pvalue       = TRUE,
                               ci           = TRUE,
                               standardized = FALSE,
                               fmi          = FALSE,

                               # control
                               level        = 0.95,
                               boot.ci.type = "perc",
                               cov.std      = TRUE,
                               fmi.options  = list(),

                               # add rows
                               rsquare = FALSE,

                               # remove rows
                               remove.system.eq      = TRUE,
                               remove.eq             = TRUE,
                               remove.ineq           = TRUE,
                               remove.def            = FALSE,
                               remove.nonfree        = FALSE,
                               remove.step1          = TRUE,
                               #remove.nonfree.scales = FALSE,

                               # output
                               add.attributes = FALSE,
                               output = "data.frame",
                               header = FALSE) {

    if(inherits(object, "lavaan.fsr")) {
        return(object$PE)
    }

    # deprecated add.attributes (for psycho/blavaan)
    if(add.attributes) {
        output <- "text"
    }


    # no se if class is not lavaan
    # can't use inherits(), as this would return TRUE if object is from blavaan
    if(class(object)[1L] != "lavaan") {
        if(missing(se) || !se) {
            se <- FALSE
            zstat <- FALSE
            pvalue <- FALSE
        }
    }

    # check output= argument
    output <- tolower(output)
    if(output %in% c("data.frame", "table")) {
        output <- "data.frame"
        header <- FALSE
    } else if(output %in% c("text", "pretty")) {
        output <- "text"
    } else {
        stop("lavaan ERROR: output must be ", sQuote("data.frame"),
             " or ", sQuote("text"))
    }

    # check fmi
    if(fmi) {
        if(inherits(object, "lavaanList")) {
            warning("lavaan WARNING: fmi not available for object of class \"lavaanList\"")
            fmi <- FALSE
        }
        if(object@Options$se != "standard") {
            warning("lavaan WARNING: fmi only available if se = \"standard\"")
            fmi <- FALSE
        }
        if(object@Options$estimator != "ML") {
            warning("lavaan WARNING: fmi only available if estimator = \"ML\"")
            fmi <- FALSE
        }
        if(!object@SampleStats@missing.flag) {
            warning("lavaan WARNING: fmi only available if missing = \"(fi)ml\"")
            fmi <- FALSE
        }
        if(!object@optim$converged) {
            warning("lavaan WARNING: fmi not available; model did not converge")
            fmi <- FALSE
        }
    }

    # no zstat + pvalue if estimator is Bayes
    if(object@Options$estimator == "Bayes") {
        zstat <- pvalue <- FALSE
    }

    PARTABLE <- as.data.frame(object@ParTable, stringsAsFactors = FALSE)
    LIST <- PARTABLE[,c("lhs", "op", "rhs", "free")]
    if(!is.null(PARTABLE$user)) {
        LIST$user <- PARTABLE$user
    }
    if(!is.null(PARTABLE$block)) {
        LIST$block <- PARTABLE$block
    } else {
        LIST$block <- rep(1L, length(LIST$lhs))
    }
    if(!is.null(PARTABLE$level)) {
        LIST$level <- PARTABLE$level
    } else {
        LIST$level <- rep(1L, length(LIST$lhs))
    }
    if(!is.null(PARTABLE$group)) {
        LIST$group <- PARTABLE$group
    } else {
        LIST$group <- rep(1L, length(LIST$lhs))
    }
    if(!is.null(PARTABLE$step)) {
        LIST$step <- PARTABLE$step
    }
    if(!is.null(PARTABLE$efa)) {
        LIST$efa <- PARTABLE$efa
    }
    if(!is.null(PARTABLE$label)) {
        LIST$label <- PARTABLE$label
    } else {
        LIST$label <- rep("", length(LIST$lhs))
    }
    if(!is.null(PARTABLE$exo)) {
        LIST$exo <- PARTABLE$exo
    } else {
        LIST$exo <- rep(0L, length(LIST$lhs))
    }
    if(inherits(object, "lavaanList")) {
        # per default: nothing!
        #if("partable" %in% object@meta$store.slots) {
        #    COF <- sapply(object@ParTableList, "[[", "est")
        #    LIST$est <- rowMeans(COF)
        #}
        LIST$est <- NULL
    } else if(!is.null(PARTABLE$est)) {
        LIST$est <- PARTABLE$est
    } else {
        LIST$est <- lav_model_get_parameters(object@Model, type = "user",
                                             extra = TRUE)
    }
    if(!is.null(PARTABLE$lower)) {
        LIST$lower <- PARTABLE$lower
    }
    if(!is.null(PARTABLE$upper)) {
        LIST$upper <- PARTABLE$upper
    }


    # add se, zstat, pvalue
    if(se && object@Options$se != "none") {
        LIST$se <- lav_object_inspect_se(object)
        # handle tiny SEs
        LIST$se <- ifelse(LIST$se < sqrt(.Machine$double.eps), 0, LIST$se)
        tmp.se <- ifelse(LIST$se < sqrt(.Machine$double.eps), NA, LIST$se)
        if(zstat) {
            LIST$z <- LIST$est / tmp.se
            if(pvalue) {
                LIST$pvalue <- 2 * (1 - pnorm( abs(LIST$z) ))
                # remove p-value if bounds have been used
                if(!is.null(PARTABLE$lower)) {
                   b.idx <- which(abs(PARTABLE$lower - PARTABLE$est) <
                                      sqrt(.Machine$double.eps) &
                                  PARTABLE$free > 0L)
                   if(length(b.idx) > 0L) {
                       LIST$pvalue[b.idx] <- as.numeric(NA)
                   }
                }
                if(!is.null(PARTABLE$upper)) {
                   b.idx <- which(abs(PARTABLE$upper - PARTABLE$est) <
                                      sqrt(.Machine$double.eps) &
                                  PARTABLE$free > 0L)
                   if(length(b.idx) > 0L) {
                       LIST$pvalue[b.idx] <- as.numeric(NA)
                   }
                }
            }
        }
    }

    # extract bootstrap data (if any)
    if(object@Options$se == "bootstrap" ||
       "bootstrap" %in%  object@Options$test ||
       "bollen.stine" %in% object@Options$test) {
        BOOT <- lav_object_inspect_boot(object)
        bootstrap.seed <- attr(BOOT, "seed") # for bca
        error.idx <- attr(BOOT, "error.idx")
        if(length(error.idx) > 0L) {
            BOOT <- BOOT[-error.idx,,drop = FALSE] # drops attributes
        }
    } else {
        BOOT <- NULL
    }

    bootstrap.successful <- NROW(BOOT) # should be zero if NULL

    # confidence interval
    if(se && object@Options$se != "none" && ci) {
        # next three lines based on confint.lm
        a <- (1 - level)/2; a <- c(a, 1 - a)
        if(object@Options$se != "bootstrap") {
            fac <- qnorm(a)
            ci <- LIST$est + LIST$se %o% fac
        } else if(object@Options$se == "bootstrap") {

            # local copy of 'norm.inter' from boot package (not exported!)
            norm.inter <- function(t, alpha)  {
                t <- t[is.finite(t)]; R <- length(t); rk <- (R + 1) * alpha
                if (!all(rk > 1 & rk < R))
                     warning("extreme order statistics used as endpoints")
                k <- trunc(rk); inds <- seq_along(k)
                out <- inds; kvs <- k[k > 0 & k < R]
                tstar <- sort(t, partial = sort(union(c(1, R), c(kvs, kvs+1))))
                ints <- (k == rk)
                if (any(ints)) out[inds[ints]] <- tstar[k[inds[ints]]]
                out[k == 0] <- tstar[1L]
                out[k == R] <- tstar[R]
                not <- function(v) xor(rep(TRUE,length(v)),v)
                temp <- inds[not(ints) & k != 0 & k != R]
                temp1 <- qnorm(alpha[temp])
                temp2 <- qnorm(k[temp]/(R+1))
                temp3 <- qnorm((k[temp]+1)/(R+1))
                tk <- tstar[k[temp]]
                tk1 <- tstar[k[temp]+1L]
                out[temp] <- tk + (temp1-temp2)/(temp3-temp2)*(tk1 - tk)
                cbind(round(rk, 2), out)
            }

            stopifnot(!is.null(BOOT))
            stopifnot(boot.ci.type %in% c("norm","basic","perc",
                                          "bca.simple", "bca"))
            if(boot.ci.type == "norm") {
                fac <- qnorm(a)
                boot.x <- colMeans(BOOT, na.rm = TRUE)
                boot.est <-
                    lav_model_get_parameters(object@Model,
                                       GLIST=lav_model_x2GLIST(object@Model, boot.x),
                                       type="user", extra=TRUE)
                bias.est <- (boot.est - LIST$est)
                ci <- (LIST$est - bias.est) + LIST$se %o% fac
            } else if(boot.ci.type == "basic") {
                ci <- cbind(LIST$est, LIST$est)
                alpha <- (1 + c(level, -level))/2

                # free.idx only
                qq <- apply(BOOT, 2, norm.inter, alpha)
                free.idx <- which(object@ParTable$free &
                                  !duplicated(object@ParTable$free))
                ci[free.idx,] <- 2*ci[free.idx,] - t(qq[c(3,4),])

                # def.idx
                def.idx <- which(object@ParTable$op == ":=")
                if(length(def.idx) > 0L) {
                    BOOT.def <- apply(BOOT, 1, object@Model@def.function)
                    if(length(def.idx) == 1L) {
                        BOOT.def <- as.matrix(BOOT.def)
                    } else {
                        BOOT.def <- t(BOOT.def)
                    }
                    qq <- apply(BOOT.def, 2, norm.inter, alpha)
                    ci[def.idx,] <- 2*ci[def.idx,] - t(qq[c(3,4),])
                }

                # TODO: add cin/ceq?

            } else if(boot.ci.type == "perc") {
                ci <- cbind(LIST$est, LIST$est)
                alpha <- (1 + c(-level, level))/2

                # free.idx only
                qq <- apply(BOOT, 2, norm.inter, alpha)
                free.idx <- which(object@ParTable$free &
                                  !duplicated(object@ParTable$free))
                ci[free.idx,] <- t(qq[c(3,4),])

                # def.idx
                def.idx <- which(object@ParTable$op == ":=")
                if(length(def.idx) > 0L) {
                    BOOT.def <- apply(BOOT, 1, object@Model@def.function)
                    if(length(def.idx) == 1L) {
                        BOOT.def <- as.matrix(BOOT.def)
                    } else {
                        BOOT.def <- t(BOOT.def)
                    }
                    qq <- apply(BOOT.def, 2, norm.inter, alpha)
                    def.idx <- which(object@ParTable$op == ":=")
                    ci[def.idx,] <- t(qq[c(3,4),])
                }

                # TODO:  add cin/ceq?

            } else if(boot.ci.type == "bca.simple") {
               # no adjustment for scale!! only bias!!
               alpha <- (1 + c(-level, level))/2
               zalpha <- qnorm(alpha)
               ci <- cbind(LIST$est, LIST$est)

               # free.idx only
               free.idx <- which(object@ParTable$free &
                                 !duplicated(object@ParTable$free))
               x <- LIST$est[free.idx]
               for(i in 1:length(free.idx)) {
                   t <- BOOT[,i]; t <- t[is.finite(t)]; t0 <- x[i]
                   # check if we have variance (perhaps constrained to 0?)
                   # new in 0.6-3
                   if(var(t) == 0) {
                       next
                   }
                   w <- qnorm(sum(t < t0)/length(t))
                   a <- 0.0 #### !!! ####
                   adj.alpha <- pnorm(w + (w + zalpha)/(1 - a*(w + zalpha)))
                   qq <- norm.inter(t, adj.alpha)
                   ci[free.idx[i],] <- qq[,2]
               }

               # def.idx
               def.idx <- which(object@ParTable$op == ":=")
               if(length(def.idx) > 0L) {
                   x.def <- object@Model@def.function(x)
                   BOOT.def <- apply(BOOT, 1, object@Model@def.function)
                   if(length(def.idx) == 1L) {
                       BOOT.def <- as.matrix(BOOT.def)
                   } else {
                       BOOT.def <- t(BOOT.def)
                   }
                   for(i in 1:length(def.idx)) {
                       t <- BOOT.def[,i]; t <- t[is.finite(t)]; t0 <- x.def[i]
                       w <- qnorm(sum(t < t0)/length(t))
                       a <- 0.0 #### !!! ####
                       adj.alpha <- pnorm(w + (w + zalpha)/(1 - a*(w + zalpha)))
                       qq <- norm.inter(t, adj.alpha)
                       ci[def.idx[i],] <- qq[,2]
                   }
               }

               # TODO:
               # - add cin/ceq
            } else if(boot.ci.type == "bca") { # new in 0.6-12
               # we assume that the 'ordinary' (nonparametric) was used

               lavoptions <- object@Options
               ngroups <- object@Data@ngroups
               nobs <- object@SampleStats@nobs
               ntotal <- object@SampleStats@ntotal

               # we need enough bootstrap runs
               if(nrow(BOOT) < ntotal) {
                   txt <- paste("BCa confidence intervals require more ",
                                "(successful) bootstrap runs (", nrow(BOOT),
                                ") than the number of observations (",
                                ntotal, ").", sep = "")
                   stop(lav_txt2message(txt, header = "lavaan ERROR:"))
               }

               # does not work with sampling weights (yet)
               if(!is.null(object@Data@weights[[1]])) {
                   stop("lavaan ERROR: BCa confidence intervals not available in the presence of sampling weights.")
               }

               # check if we have a seed
               if(is.null(bootstrap.seed)) {
                   stop("lavaan ERROR: seed not available in BOOT object.")
               }

               # compute 'X' matrix with frequency indices (to compute
               # the empirical influence values using regression)
               FREQ <- lav_utils_bootstrap_indices(R = lavoptions$bootstrap,
                   nobs = nobs, parallel = lavoptions$parallel[1],
                   ncpus = lavoptions$ncpus, cl = lavoptions[["cl"]],
                   iseed = bootstrap.seed, return.freq = TRUE,
                   merge.groups = TRUE)
               if(length(error.idx) > 0L) {
                   FREQ <- FREQ[-error.idx, , drop = FALSE]
               }
               stopifnot(nrow(FREQ) == nrow(BOOT))

               # compute empirical influence values (using regression)
               # remove first column per group
               first.idx <- sapply(object@Data@case.idx, "[[", 1L)
               LM <- lm.fit(x = cbind(1, FREQ[,-first.idx]), y = BOOT)
               BETA <- unname(LM$coefficients)[-1,,drop = FALSE]
               LL <- rbind(0, BETA)

               # compute 'a' for all parameters at once
               AA <- apply(LL, 2L, function(x) {
                           L <- x - mean(x); sum(L^3)/(6*sum(L^2)^1.5) })

               # adjustment for both bias AND scale
               alpha <- (1 + c(-level, level))/2
               zalpha <- qnorm(alpha)
               ci <- cbind(LIST$est, LIST$est)

               # free.idx only
               free.idx <- which(object@ParTable$free &
                                 !duplicated(object@ParTable$free))
               stopifnot(length(free.idx) == ncol(BOOT))
               x <- LIST$est[free.idx]
               for(i in 1:length(free.idx)) {
                   t <- BOOT[,i]; t <- t[is.finite(t)]; t0 <- x[i]
                   # check if we have variance (perhaps constrained to 0?)
                   # new in 0.6-3
                   if(var(t) == 0) {
                       next
                   }
                   w <- qnorm(sum(t < t0)/length(t))
                   a <- AA[i]
                   adj.alpha <- pnorm(w + (w + zalpha)/(1 - a*(w + zalpha)))
                   qq <- norm.inter(t, adj.alpha)
                   ci[free.idx[i],] <- qq[,2]
               }

               # def.idx
               def.idx <- which(object@ParTable$op == ":=")
               if(length(def.idx) > 0L) {
                   x.def <- object@Model@def.function(x)
                   BOOT.def <- apply(BOOT, 1, object@Model@def.function)
                   if(length(def.idx) == 1L) {
                       BOOT.def <- as.matrix(BOOT.def)
                   } else {
                       BOOT.def <- t(BOOT.def)
                   }

                   # recompute empirical influence values
                   LM <- lm.fit(x = cbind(1, FREQ[,-1]), y = BOOT.def)
                   BETA <- unname(LM$coefficients)[-1,,drop = FALSE]
                   LL <- rbind(0, BETA)

                   # compute 'a' values for all def.idx parameters
                   AA <- apply(LL, 2L, function(x) {
                       L <- x - mean(x); sum(L^3)/(6*sum(L^2)^1.5) })

                   # compute bca ci
                   for(i in 1:length(def.idx)) {
                       t <- BOOT.def[,i]; t <- t[is.finite(t)]; t0 <- x.def[i]
                       w <- qnorm(sum(t < t0)/length(t))
                       a <- AA[i]
                       adj.alpha <- pnorm(w + (w + zalpha)/(1 - a*(w + zalpha)))
                       qq <- norm.inter(t, adj.alpha)
                       ci[def.idx[i],] <- qq[,2]
                   }
               }

               # TODO:
               # - add cin/ceq
            }
        }

        LIST$ci.lower <- ci[,1]; LIST$ci.upper <- ci[,2]
    }

    # standardized estimates?
    if(standardized) {
        LIST$std.lv  <- lav_standardize_lv(object, cov.std = cov.std)
        LIST$std.all <- lav_standardize_all(object, est.std=LIST$est.std,
                                            cov.std = cov.std)
        LIST$std.nox <- lav_standardize_all_nox(object, est.std=LIST$est.std,
                                                cov.std = cov.std)
    }

    # rsquare?
    if(rsquare) {
        r2 <- lavTech(object, "rsquare", add.labels = TRUE)
        NAMES <- unlist(lapply(r2, names)); nel <- length(NAMES)
        if(nel == 0L) {
            warning("lavaan WARNING: rsquare = TRUE, but there are no dependent variables")
        } else {
            if(lav_partable_nlevels(LIST) == 1L) {
                block <- rep(1:length(r2), sapply(r2, length))
                first.block.idx <- which(!duplicated(LIST$block) &
                                         LIST$block > 0L)
                GVAL <- LIST$group[first.block.idx]
                if(length(GVAL) > 0L) {
                    group <- rep(GVAL, sapply(r2, length))
                } else {
                    # single block, single group
                    group <- rep(1L, length(block))
                }
                R2 <- data.frame( lhs = NAMES, op = rep("r2", nel), rhs = NAMES,
                                  block = block, group = group,
                                  est = unlist(r2), stringsAsFactors = FALSE )
            } else {
                # add level column
                block <- rep(1:length(r2), sapply(r2, length))
                first.block.idx <- which(!duplicated(LIST$block) &
                                         LIST$block > 0L)
                # always at least two blocks
                GVAL <- LIST$group[first.block.idx]
                group <- rep(GVAL, sapply(r2, length))
                LVAL <- LIST$level[first.block.idx]
                level <- rep(LVAL, sapply(r2, length))
                R2 <- data.frame( lhs = NAMES, op = rep("r2", nel), rhs = NAMES,
                              block = block, group = group,
                              level = level,
                              est = unlist(r2), stringsAsFactors = FALSE )
            }
            LIST <- lav_partable_merge(pt1 = LIST, pt2 = R2, warn = FALSE)
        }
    }

    # fractional missing information (if estimator="fiml")
    if(fmi) {
        SE.orig <- LIST$se

        # new in 0.6-6, use 'EM' based (unstructured) sample statistics
        # otherwise, it would be as if we use expected info, while the
        # original use observed, producing crazy results
        if(object@Data@ngroups > 1L) {
            EM.cov  <- lapply(lavInspect(object, "sampstat.h1"), "[[", "cov")
            EM.mean <- lapply(lavInspect(object, "sampstat.h1"), "[[", "mean")
        } else {
            EM.cov  <- lavInspect(object, "sampstat.h1")$cov
            EM.mean <- lavInspect(object, "sampstat.h1")$mean
        }

        PT <- parTable(object)
        PT$ustart <- PT$est
        PT$start <- PT$est <- NULL

        this.options <- object@Options
        if(!is.null(fmi.options) && is.list(fmi.options)) {
            # modify original options
            this.options <- modifyList(this.options, fmi.options)
        }
        # override
        this.options$optim.method <- "none"
        this.options$sample.cov.rescale <- FALSE
        this.options$check.gradient <- FALSE
        this.options$baseline <- FALSE
        this.options$h1 <- FALSE
        this.options$test <- FALSE

        fit.complete <- lavaan(model = PT,
                               sample.cov   = EM.cov,
                               sample.mean  = EM.mean,
                               sample.nobs  = lavInspect(object, "nobs"),
                               slotOptions  = this.options)

        SE.comp <- parameterEstimates(fit.complete, ci = FALSE, fmi = FALSE,
            zstat = FALSE, pvalue = FALSE, remove.system.eq = FALSE,
            remove.eq = FALSE, remove.ineq = FALSE,
            remove.def = FALSE, remove.nonfree = FALSE,
            rsquare = rsquare, add.attributes = FALSE)$se

        SE.comp <- ifelse(SE.comp == 0.0, as.numeric(NA), SE.comp)
        LIST$fmi <- 1 - (SE.comp * SE.comp) / (SE.orig * SE.orig)
    }

    # if single level, remove level column
    if(object@Data@nlevels == 1L) LIST$level <- NULL

    # if single group, remove group column
    if(object@Data@ngroups == 1L) LIST$group <- NULL

    # if single everything, remove block column
    if(object@Data@nlevels == 1L &&
       object@Data@ngroups == 1L) {
        LIST$block <- NULL
    }

    # if no user-defined labels, remove label column
    if(sum(nchar(object@ParTable$label)) == 0L) {
        LIST$label <- NULL
    }

    # remove non-free parameters? (but keep ==, >, < and :=)
    if(remove.nonfree) {
        nonfree.idx <- which( LIST$free == 0L &
                              !LIST$op %in% c("==", ">", "<", ":=") )
        if(length(nonfree.idx) > 0L) {
            LIST <- LIST[-nonfree.idx,]
        }
    }

    # remove non-free scales (categorical only), except 'user-specified'
    #
    # (not yet public)
    #
    #if(remove.nonfree.scales) {
    #    nonfree.scales.idx <- which( LIST$free == 0L & LIST$op == "~*~" &
    #                                 LIST$user == 0L)
    #    if(length(nonfree.scales.idx) > 0L) {
    #        LIST <- LIST[-nonfree.scales.idx,]
    #    }
    #}

    # remove 'free' column
    LIST$free <- NULL

    # remove == rows?
    if(remove.eq) {
        eq.idx <- which(LIST$op == "==" & LIST$user == 1L)
        if(length(eq.idx) > 0L) {
            LIST <- LIST[-eq.idx,]
        }
    }
    if(remove.system.eq) {
        eq.idx <- which(LIST$op == "==" & LIST$user != 1L)
        if(length(eq.idx) > 0L) {
            LIST <- LIST[-eq.idx,]
        }
    }
    # remove <> rows?
    if(remove.ineq) {
        ineq.idx <- which(LIST$op %in% c("<", ">"))
        if(length(ineq.idx) > 0L) {
            LIST <- LIST[-ineq.idx,]
        }
    }
    # remove := rows?
    if(remove.def) {
        def.idx <- which(LIST$op == ":=")
        if(length(def.idx) > 0L) {
            LIST <- LIST[-def.idx,]
        }
    }

    # remove step 1 rows?
    if(remove.step1 && !is.null(LIST$step)) {
        step1.idx <- which(LIST$step == 1L)
        if(length(step1.idx) > 0L) {
            LIST <- LIST[-step1.idx,]
        }
        # remove step column
        LIST$step <- NULL
    }

    # always remove 'da' entries (if any)
    if(any(LIST$op == "da")) {
        da.idx <- which(LIST$op == "da")
        LIST <- LIST[-da.idx,,drop = FALSE]
    }



    # remove LIST$user
    LIST$user <- NULL

    if(output == "text") {
        class(LIST) <- c("lavaan.parameterEstimates", "lavaan.data.frame",
                         "data.frame")
        if(header) {
            attr(LIST, "information") <- object@Options$information[1]
            attr(LIST, "information.meat") <- object@Options$information.meat
            attr(LIST, "se") <- object@Options$se
            attr(LIST, "group.label") <- object@Data@group.label
            attr(LIST, "level.label") <- object@Data@level.label
            attr(LIST, "bootstrap") <- object@Options$bootstrap
            attr(LIST, "bootstrap.successful") <- bootstrap.successful
            attr(LIST, "missing") <- object@Options$missing
            attr(LIST, "observed.information") <-
                object@Options$observed.information[1]
            attr(LIST, "h1.information") <- object@Options$h1.information[1]
            attr(LIST, "h1.information.meat") <- object@Options$h1.information.meat
            attr(LIST, "header") <- header
            # FIXME: add more!!
        }
    } else {
        LIST$exo <- NULL
        LIST$lower <- LIST$upper <- NULL
        class(LIST) <- c("lavaan.data.frame", "data.frame")
    }

    LIST
}

parameterTable <- parametertable <- parTable <- partable <-
        function(object) {

    # convert to data.frame
    out <- as.data.frame(object@ParTable, stringsAsFactors = FALSE)

    class(out) <- c("lavaan.data.frame", "data.frame")
    out
}

varTable <- vartable <- function(object, ov.names=names(object),
                                 ov.names.x=NULL,
                                 ordered = NULL, factor = NULL,
                                 as.data.frame.=TRUE) {

    if(inherits(object, "lavaan")) {
        VAR <- object@Data@ov
    } else if(inherits(object, "lavData")) {
        VAR <- object@ov
    } else if(inherits(object, "data.frame")) {
        VAR <- lav_dataframe_vartable(frame = object, ov.names = ov.names,
                                      ov.names.x = ov.names.x,
                                      ordered = ordered, factor = factor,
                                      as.data.frame. = FALSE)
    } else {
        stop("object must of class lavaan or a data.frame")
    }

    if(as.data.frame.) {
        VAR <- as.data.frame(VAR, stringsAsFactors=FALSE,
                             row.names=1:length(VAR$name))
        class(VAR) <- c("lavaan.data.frame", "data.frame")
    }

    VAR
}


setMethod("fitted.values", "lavaan",
function(object, type = "moments", labels=TRUE) {

    # lowercase type
    type <- tolower(type)

    # catch type="casewise"
    if(type %in% c("casewise","case","obs","observations","ov")) {
        return( lavPredict(object, type = "ov", label = labels) )
    }

    lav_object_inspect_implied(object,
               add.labels = labels, add.class = TRUE,
               drop.list.single.group = TRUE)
})


setMethod("fitted", "lavaan",
function(object, type = "moments", labels=TRUE) {
     fitted.values(object, type = type, labels = labels)
})


setMethod("vcov", "lavaan",
function(object, type = "free", labels = TRUE, remove.duplicated = FALSE) {

    # check for convergence first!
    if(object@optim$npar > 0L && !object@optim$converged)
        stop("lavaan ERROR: model did not converge")

    if(object@Options$se == "none") {
        stop("lavaan ERROR: vcov not available if se=\"none\"")
    }

    if(type == "user" || type == "joint" || type == "all" || type == "full" ||
       type == "complete") {
        if(remove.duplicated) {
            stop("lavaan ERROR: argument \"remove.duplicated\" not supported if type = \"user\"")
        }
        VarCov <- lav_object_inspect_vcov_def(object, joint = TRUE,
                                          add.labels = labels,
                                          add.class = TRUE)
    } else if(type == "free") {
        VarCov <- lav_object_inspect_vcov(object,
                                          add.labels = labels,
                                          add.class = TRUE,
                                          remove.duplicated = remove.duplicated)
    } else {
        stop("lavaan ERROR: type argument should be \"user\" or \"free\"")
    }

    VarCov
})


# logLik (so that we can use the default AIC/BIC functions from stats4(
setMethod("logLik", "lavaan",
function(object, ...) {
    if(object@Options$estimator != "ML") {
        warning("lavaan WARNING: logLik only available if estimator is ML")
    }
    if(object@optim$npar > 0L && !object@optim$converged) {
        warning("lavaan WARNING: model did not converge")
    }

    # new in 0.6-1: we use the @loglik slot (instead of fitMeasures)
    if(.hasSlot(object, "loglik")) {
        LOGL <- object@loglik
    } else {
        LOGL <- lav_model_loglik(lavdata        = object@Data,
                                 lavsamplestats = object@SampleStats,
                                 lavimplied     = object@implied,
                                 lavmodel       = object@Model,
                                 lavoptions     = object@Options)
    }

    logl <- LOGL$loglik
    attr(logl, "df") <- LOGL$npar    ### note: must be npar, not df!!
    attr(logl, "nobs") <- LOGL$ntotal
    class(logl) <- "logLik"
    logl
})

# nobs
if(!exists("nobs", envir=asNamespace("stats4"))) {
    setGeneric("nobs", function(object, ...) standardGeneric("nobs"))
}
setMethod("nobs", signature(object = "lavaan"),
function(object, ...) {
    object@SampleStats@ntotal
})

# see: src/library/stats/R/update.R
setMethod("update", signature(object = "lavaan"),
function(object, model, add, ..., evaluate = TRUE) {

    call <- object@call
    if (is.null(call))
        stop("need an object with call slot")

    extras <- match.call(expand.dots = FALSE)$...

    if (!missing(model)) {
      #call$formula <- update.formula(formula(object), formula.)
      call$model <- model
    } else if (exists(as.character(call$model))) {
      call$model <- eval(call$model, parent.frame())
    } else if (is.character(call$model)) {
      ## do nothing
      ## call$model <- call$model
    } else {
      call$model <- parTable(object)
      call$model$est <- NULL
      call$model$se <- NULL
    }
    if (!is.null(call$slotParTable) && is.list(call$model)) call$slotParTable <- call$model

    if (length(extras) > 0) {
      ## check for call$slotOptions conflicts
      if (!is.null(call$slotOptions)) {
        sameNames <- intersect(names(lavOptions()), names(extras))
        for (i in sameNames) {
          call$slotOptions[[i]] <- extras[[i]]
          extras[i] <- NULL # not needed if they are in slotOptions
        }
      }
      existing <- !is.na(match(names(extras), names(call)))
        for (a in names(extras)[existing]) call[[a]] <- extras[[a]]
        if (any(!existing)) {
            call <- c(as.list(call), extras[!existing])
            call <- as.call(call)
        }
    }

    if (missing(add) && !evaluate) return(call)
    ## for any of the other 3 scenarios, we need the updated fit

    ## Check if "add" and "model" are both strings; combine them
    if (missing(add)) {
      ADD.already.in.parTable <- TRUE # because nothing to add
    } else {
      if (is.character(add) && is.character(call$model)) {
        call$model <- c(call$model, add)
        ADD.already.in.parTable <- TRUE
      } else ADD.already.in.parTable <- FALSE
    }
    newfit <- eval(call, parent.frame())
    if (ADD.already.in.parTable && evaluate) return(newfit)

    ## only remaining situations: "add" exists, but either "add" or "model"
    ## is a parameter table, so update the parameter table in the call
    if (!(mode(add) %in% c("list","character"))) {
        stop("'add' argument must be model syntax or parameter table. ",
             "See ?lavaanify help page.")
    }
    PT <- lav_object_extended(newfit, add = add)@ParTable
    PT$user <- NULL # get rid of "10" category used in lavTestScore()
    ## group == 0L in new rows
    PT$group[PT$group == 0L] <- PT$block[PT$group == 0L]
    # PT$plabel == "" in new rows.  Consequences?
    PT$est <- NULL
    PT$se <- NULL
    call$model <- PT

    if (evaluate) {
        eval(call, parent.frame())
    }
    else call
})




setMethod("anova", signature(object = "lavaan"),
function(object, ...) {
    # NOTE: if we add additional arguments, it is not the same generic
    # anova() function anymore, and match.call will be screwed up

    # NOTE: we need to extract the names of the models from match.call here,
    #       otherwise, we loose them in the call stack

    mcall <- match.call(expand.dots = TRUE)
    dots <- list(...)

    # catch SB.classic and SB.H0
    #SB.classic <- TRUE; SB.H0 <- FALSE

    #arg.names <- names(dots)
    #arg.idx <- which(nchar(arg.names) > 0L)
    #if(length(arg.idx) > 0L) {
    #    if(!is.null(dots$SB.classic))
    #        SB.classic <- dots$SB.classic
    #    if(!is.null(dots$SB.H0))
    #        SB.H0 <- dots$SB.H0
    #    dots <- dots[-arg.idx]
    #}

    modp <- if(length(dots))
        sapply(dots, inherits, "lavaan") else logical(0)
    mods <- c(list(object), dots[modp])
    NAMES <- sapply(as.list(mcall)[c(FALSE, TRUE, modp)], deparse)

    # use do.call to handle changed dots
    #ans <- do.call("lavTestLRT", c(list(object = object,
    #               SB.classic = SB.classic, SB.H0 = SB.H0,
    #               model.names = NAMES), dots))

    #ans
    lavTestLRT(object = object, ..., model.names = NAMES)
})

Try the lavaan package in your browser

Any scripts or data that you put into this service are public.

lavaan documentation built on July 26, 2023, 5:08 p.m.