GibbsMultinomialMLB: Multinomial LCM

Description Usage Arguments Value Examples

View source: R/GibbsMultinomialMLB.R

Description

This code implements a version of the collapsed Gibbs sampler from Bradley et al. (2019). The model assumes that data follow a multinomial distribution with a logit link to a mixed effects model. The priors and random effects are assumed to follow a multivariate logit-beta distribution.

Usage

1
GibbsMultinomialMLB(report, B, y, nn, X, Psi, nslice = 2)

Arguments

report

Option to print the iteration number of the MCMC.

B

The number of iterations of the collapsed Gibbs sampler.

y

An n dimensional vector consisting of multinomial count-valued observations.

nn

An n dimensional vector consisting of totals associated with multinomial count-valued observations.

X

An nxp matrix of covariates. Each column represents a covariate. Each row corresponds to one of the n replicates.

Psi

An nxr matrix of basis functions. Each column represents a basis function. Each row corresponds to one of the n replicates.

nslice

The burnin for the slice sampler

Value

beta A pxB matrix of MCMC values for beta. The vector beta corresponds to the covariates X.

eta A rxB matrix of MCMC values for eta. The vector eta corresponds to the basis functions G.

xi A nxB matrix of MCMC values for xi. The vector xi corresponds to uncorrelated random effects.

alphab A 1xB matrix of MCMC values for the shape parameter associated with beta.

alphae A 1xB matrix of MCMC values for the shape parameter associated with eta.

alphax A 1xB matrix of MCMC values for the shape parameter associated with xi.

kappab A 1xB matrix of MCMC values for the second shape parameter associated with beta.

kappae A 1xB matrix of MCMC values for the second shape parameter associated with eta.

kappax A 1xB matrix of MCMC values for the second shape parameter associated with xi

Examples

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
library(CM)
set.seed(23)

#simulate pseudo data
n = 4*50
X = cbind(1,sin(pi*seq(0,2,by = ((2 - 0)/(n - 1)))))
X=X[1:n,]
p = dim(X)[2]
output<-qr(X)
Q = qr.Q(output)

#Moran's I basis functions (Hughes and Haran, 2013)
r = n/2
Psi = diag(n) - Q%*%t(Q)
output2<-eigen(Psi)
Psi = output2$vectors[,1:r]

#large scale parameters
beta = c(0.01,-2)
mu = X%*%beta
plot(mu)

#small scale parameters
eta = sqrt(var(X%*%beta)/2)*rnorm(r)
rand.effects = Psi%*%eta
plot(rand.effects)

#fine scale parameters
xi = sqrt(var(Psi%*%eta)/2)*rnorm(n)

#simulate multinomial data with 5 categories
nu = mu+rand.effects+xi
plot(nu)

# organize pi according to Section 2.1
pi1 = exp(nu[1:(n/4)])/(1+exp(nu[1:(n/4)]))
p2 = exp(nu[(n/4 +1):(2*n/4)])/(1+exp(nu[(n/4 +1):(2*n/4)]))
pi2 = (1-pi1)*p2
p3 = exp(nu[(2*n/4 +1):(3*n/4)])/(1+exp(nu[(2*n/4 +1):(3*n/4)]))
pi3 = (1-pi1 - pi2)*p3
p4 = exp(nu[(3*n/4 +1):(4*n/4)])/(1+exp(nu[(3*n/4 +1):(4*n/4)]))
pi4 = (1-pi1 - pi2 - pi3)*p4
pi5 = (1-pi1 - pi2 - pi3-pi4)

#generate sample sizes
m = rpois(n/4,50)
data = matrix(0,n/4,5)
for (j in 1:(n/4)){
  data[j,]=rmultinom(1, m[j], prob = c(pi1[j],pi2[j],pi3[j],pi4[j],pi5[j]))
}

#organize vectors according to Section 2.1 of Bradley (2019)
y = matrix(data[,1:4],n,1)
nn = matrix(cbind(m,m-data[,1],m-rowSums(data[,1:2]),m-rowSums(data[,1:3])),n,1)
indics = nn>0
y = y[indics==1]
nn = nn[indics==1]
XX = X
PsiPsi = Psi
X = X[indics==1,]
Psi = Psi[indics==1,]

#number of MCMC reps
B = 2000

#Implement Pseudo-Code 2 with updates for shape parameters
output<-GibbsMultinomialMLB(100,B,y,nn,X,Psi)

#check some trace plots
plot((as.mcmc(output$beta[1,1000:B])))
plot((as.mcmc(output$beta[2,1000:B])))
plot(as.mcmc(output$alphab[1,1000:B]))
plot(as.mcmc(output$alphae[1,1000:B]))
plot(as.mcmc(output$alphax[1,1000:B]))
plot(as.mcmc(output$kappab[1,1000:B]))
plot(as.mcmc(output$kappae[1,1000:B]))
plot(as.mcmc(output$kappax[1,1000:B]))
plot((as.mcmc(output$eta[5,1000:B])))
plot((as.mcmc(output$eta[15,1000:B])))
plot((as.mcmc(output$eta[20,1000:B])))
plot((as.mcmc(output$xi[10,1000:B])))
plot((as.mcmc(output$xi[20,1000:B])))
plot((as.mcmc(output$xi[30,1000:B])))

#hows our estimate of beta? true beta is (0.01,-2)
apply(output$beta[,1000:B], 1, quantile,probs=c(0.025,0.5,0.975))

#hows our estimate of the proportions?
#get mcmc reps of nu
nu.gibbs = XX%*%(output$beta[,1000:B])+PsiPsi%*%(output$eta[,1000:B])
nu.gibbs[indics==1,]=output$xi[,1000:B] +XX[indics==1,]%*%(output$beta[,1000:B])+PsiPsi[indics==1,]%*%(output$eta[,1000:B])
nu.gibbs[indics==0,]=XX[indics==0,]%*%(output$beta[,1000:B])+PsiPsi[indics==0,]%*%(output$eta[,1000:B])

#reorganize mcmc reps of nu into pi according to Section 2.1
pi1m = exp(nu.gibbs[1:(n/4),])/(1+exp(nu.gibbs[1:(n/4),]))
p2m = exp(nu.gibbs[(n/4 +1):(2*n/4),])/(1+exp(nu.gibbs[(n/4 +1):(2*n/4),]))
pi2m = (1-pi1m)*p2m
p3m = exp(nu.gibbs[(2*n/4 +1):(3*n/4),])/(1+exp(nu.gibbs[(2*n/4 +1):(3*n/4),]))
pi3m = (1-pi1m - pi2m)*p3m
p4m = exp(nu.gibbs[(3*n/4 +1):(4*n/4),])/(1+exp(nu.gibbs[(3*n/4 +1):(4*n/4),]))
pi4m = (1-pi1m - pi2m - pi3)*p4m

pit = c(pi1,pi2,pi3,pi4)
pihat = c(rowMeans(pi1m),rowMeans(pi2m),rowMeans(pi3m),rowMeans(pi4m))
pibounds = apply(rbind(pi1m,pi2m,pi3m,pi4m), 1, quantile, probs = c(0.025,0.975))
pilow = as.matrix(pibounds[1,],200,1)
piup = as.matrix(pibounds[2,],200,1)
pilow[pilow<0] = 0
piup[piup<0] = 0
pihat[pihat<0] = 0

#figure
plot(pit,pihat,ylab=c("Estimated Proportion"),xlab=c("True Proportion"),main=c("Estimated Versus Truth"))
abline(0,1)

#version of figure from manuscript
plot(1:n,pit, type = "l", col = "black",xlab = "Arbitrary Ordering of Proportions",ylab = "Proportions")
lines(1:n,pihat, type = "l", col = "red",lty=1)
legend("topright", c("Truth","Estimated"),col=c("black", "red"), lty=c(1,1))

JonathanBradley28/CM documentation built on July 8, 2021, 9:59 a.m.