Description Usage Arguments Value Examples
View source: R/GibbsPoissonMLG.R
This code implements a version of the collapsed Gibbs sampler from Bradley et al. (2018). The model assumes that data follow a poisson distribution with a log link to a mixed effects model. The priors and random effects are assumed to follow a multivariate log-gamma distribution.
1 2 3 | GibbsPoissonMLG(Niter = 2000, X, G, data, sigbeta = 10,
printevery = 100, updatekappa = FALSE, jointupdate = TRUE,
estRegress = TRUE)
|
Niter |
The number of iterations of the collapsed Gibbs sampler |
X |
An nxp matrix of covariates. Each column represents a covariate. Each row corresponds to one of the n replicates. |
G |
An nxr matrix of basis functions. Each column represents a basis function. Each row corresponds to one of the n replicates. |
data |
An n dimensional vector consisting of count-valued observations. |
sigbeta |
Prior variance on the regression parameters. |
printevery |
Option to print the iteration number of the MCMC. |
updatekappa |
Updating kappa is not needed if X contains an intercept. The default is FALSE. |
jointupdate |
Block update beta and eta together. Default is TRUE. |
estRegress |
Indicator variable. If true, regression parameters will be based on regressing onto Y = Xbeta+G eta + delta |
betas A pxNiter matrix of MCMC values for beta. The vector beta corresponds to the covariates X.
etas A rxNiter matrix of MCMC values for eta. The vector eta corresponds to the basis functions G.
deltas A nxNiter matrix of MCMC values for delta. The vector delta corresponds to uncorrelated random effects.
lambda_rep A nxNiter matrix of MCMC values for the mean of the Poisson dataset, modeled with, exp(Xbeta+Geta+delta).
alpha_b A 1xNiter matrix of MCMC values for the shape parameter associated with beta.
alpha_eta A 1xNiter matrix of MCMC values for the shape parameter associated with eta.
alpha_delta A 1xNiter matrix of MCMC values for the shape parameter associated with delta.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 | #load the necessary packages
library(CM)
set.seed(123)
#define a test function
#A non-linear test function
lambda <- function(t) exp(1.1 + sin(2*pi*t))
#define some 1-d locations
points = seq(0,1,length.out=1001)
points=points[2:1001]
m = dim(as.matrix(points))[1]
#get the true mean at these locations
truemean<-matrix(0,m,1)
for (j in 1:length(points)){
truemean[j,1] = lambda(points[j])
}
#simulate the data
data = matrix(0,m,1)
for (i in 1:m){
data[i] = rpois(1,truemean[i])
}
#see how many zeros there are
sum(data==0)
#plot the data
plot(data,xlab="time",ylab="Poisson counts",main="Counts vs. time")
#covariate intercept-only
X = matrix(1,m,1)
p <- dim(X)[2]
##compute the basis function matrix
#compute thin-plate splines
r = 8
knots = seq(0,1,length.out=r)
#orthogonalize G
G = THINSp(as.matrix(points,m,1),as.matrix(knots,r,1))
outG<-qr(G)
G<-qr.Q(outG)
#orthogonalize X
outX<-qr(X)
X<-qr.Q(outX)
#Run the MCMC algorithm
output<-GibbsPoissonMLG(Niter=2000,X,G,data)
#trace plots (without burnin)
plot(as.mcmc(output$betas[1000:2000]))
plot(as.mcmc(output$etas[1,1000:2000]))
plot(as.mcmc(output$etas[8,1000:2000]))
plot(as.mcmc(output$deltas[10,1000:2000]))
plot(as.mcmc(output$alpha_eta[1,1000:2000]))
#alpha_delta does not mix well, one could thin, however, we choose not to. See MacEachern and Berliner (1994) for a discussion on thinning.
plot(as.mcmc(output$alpha_delta[1,1000:2000]))
#estimates (remove a burnin)
lambda_est = apply(output$lambda_rep[,1000:2000],1,mean)
lambda_lower= apply(output$lambda_rep[,1000:2000],1,quantile,0.025)
lambda_upper= apply(output$lambda_rep[,1000:2000],1,quantile,0.975)
#plot estimates and truth
plot(1:m,truemean,ylim = c(0,max(lambda_upper)+1))
lines(1:m,lambda_est,col="red")
lines(1:m,lambda_lower,col="blue")
lines(1:m,lambda_upper,col="blue")
#smooth estimates (remove a burnin)
lambda_est = apply(output$lambda_rep_smooth[,1000:2000],1,mean)
lambda_lower= apply(output$lambda_rep_smooth[,1000:2000],1,quantile,0.025)
lambda_upper= apply(output$lambda_rep_smooth[,1000:2000],1,quantile,0.975)
#plot smooth estimates and truth
plot(1:m,truemean,ylim = c(0,max(lambda_upper)+1))
lines(1:m,lambda_est,col="red")
lines(1:m,lambda_lower,col="blue")
lines(1:m,lambda_upper,col="blue")
covmat = matrix(0,1000,1000)
for (jj in 1:1000){
covmat = covmat+(output$lambda_rep[,1000+jj] - lambda_est)%*%t((output$lambda_rep[,1000+jj] - lambda_est))/1001
print(jj)
}
vars = 1/sqrt(diag(covmat))
corrmat= diag(vars)%*%covmat%*% diag(vars)
image(corrmat)
------------------
#Example 2
#library(devtools)
#install_github("JonathanBradley28/CM")
library(CM)
set.seed(12)
#define some 1-d locations
points = seq(0,1,length.out=201)
points=points[2:201]
m = dim(as.matrix(points))[1]
#covariate intercept-only
X = matrix(1,m,1)
p <- dim(X)[2]
##compute the basis function matrix
#compute thin-plate splines
r = 8
knots = seq(0,1,length.out=r)
#orthogonalize G
G = THINSp(as.matrix(points,m,1),as.matrix(knots,r,1))
outG<-qr(G)
G<-qr.Q(outG)
#get the true mean at these locations
trueeta = c(-1,1,-2,2,-3,3,1,1)
truemean=matrix(0,length(points),1)
for (j in 1:length(points)){
truemean[j,1] = exp(X[j]*2+G[j,]%*%trueeta)
}
#simulate the data
data = matrix(0,m,1)
for (i in 1:m){
data[i] = rpois(1,truemean[i])
}
#see how many zeros there are
sum(data==0)
#plot the data
plot(data,xlab="time",ylab="Poisson counts",main="Counts vs. time")
#orthogonalize X
#outX<-qr(X)
#X<-qr.Q(outX)
#Run the MCMC algorithm
output<-GibbsPoissonMLG(Niter=2000,X,G,data,sigbeta=100,jointupdate = FALSE)
#trace plots (without burnin)
betaerror = (2-mean(output$betas[1000:2000]))^2
plot(as.mcmc(output$betas[1000:2000]))
etaerror = mean((trueeta-apply(output$etas[,1000:2000],1,mean))^2)
plot(as.mcmc(output$etas[1,1000:2000]))
plot(as.mcmc(output$etas[2,1000:2000]))
plot(as.mcmc(output$etas[3,1000:2000]))
plot(as.mcmc(output$etas[4,1000:2000]))
plot(as.mcmc(output$etas[5,1000:2000]))
plot(as.mcmc(output$etas[6,1000:2000]))
plot(as.mcmc(output$etas[7,1000:2000]))
plot(as.mcmc(output$etas[8,1000:2000]))
prederror =mean((truemean-apply(output$lambda_rep[,1000:2000],1,mean))^2)
plot(as.mcmc(output$deltas[1,1000:2000]))
plot(as.mcmc(output$deltas[2,1000:2000]))
#estimates (remove a burnin)
lambda_est = apply(output$lambda_rep_smooth[,1000:2000],1,mean)
lambda_lower= apply(output$lambda_rep_smooth[,1000:2000],1,quantile,0.025)
lambda_upper= apply(output$lambda_rep_smooth[,1000:2000],1,quantile,0.975)
#plot estimates and truth
plot(1:m,truemean,ylim = c(0,max(lambda_upper)+1))
lines(1:m,lambda_est,col="red")
lines(1:m,lambda_lower,col="blue")
lines(1:m,lambda_upper,col="blue")
|
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.