# R/MTS.R In MTS: All-Purpose Toolkit for Analyzing Multivariate Time Series (MTS) and Estimating Multivariate Volatility Models

```### An MTS package by Ruey S. Tsay
##library(mvtnorm)
###
"MTSplot" <- function(data,caltime=NULL){
## plot the multivariate time series
### caltime: calendar time
if(!is.matrix(data))data=as.matrix(data)
if(is.ts(data)){
plot(data)
}
else{
nT=dim(data)[1]
tdx=c(1:nT)
if(length(caltime) > 1)tdx=caltime
k=dim(data)[2]
if(k < 4){
par(mfcol=c(k,1))
for (j in 1:k){
plot(tdx,data[,j],xlab='time',ylab=colnames(data)[j],type='l')
}
}
if(k == 4){
par(mfcol=c(2,2))
for (j in 1:k){
plot(tdx,data[,j],xlab='time',ylab=colnames(data)[j],type='l')
}
}
if((k > 4) && (k < 13)){
par(mfcol=c(3,2),mai=c(0.3,0.3,0.3,0.3))
k1=6
jcnt=0
for (j in 1:k){
plot(tdx,data[,j],xlab='time',ylab=colnames(data)[j],type='l',cex.axis=0.8)
jcnt=jcnt+1
if((jcnt == k1) && (k > 6)){
jcnt=0
cat("Hit return for more plots: ","\n")
}
}
}
if(k > 12){
par(mfcol=c(1,1))
yl=range(data)*1.05
plot(tdx,data[,1],xlab='time',ylab=' ',type='l',ylim=yl)
for (j in 2:k){
lines(tdx,data[,j],lty=j,col=j)
}
}
#end of the program
}
par(mfcol=c(1,1))
}

####
"VAR" <- function(x,p=1,output=T,include.mean=T,fixed=NULL){
# Fits a vector AR(p) model, computes AIC, and residuals
# fixed[i,j] = 1 denotes the parameter needs estimation, = 0, means fixed to 0.
if(!is.matrix(x))x=as.matrix(x)
Tn=dim(x)[1]
k=dim(x)[2]
if(p < 1)p=1
idm=k*p
ne=Tn-p
ist=p+1
y=x[ist:Tn,]
if(include.mean){
idm=idm+1
xmtx=cbind(rep(1,ne),x[p:(Tn-1),])
}
else {
xmtx=x[p:(Tn-1),]
}
if(p > 1){
for (i in 2:p){
xmtx=cbind(xmtx,x[(ist-i):(Tn-i),])
}
}
#
ndim=ncol(xmtx)
if(length(fixed)==0){
paridx=matrix(1,ndim,k)
}
else {
paridx=fixed
}
#perform estimation component-by-component
res=NULL
beta=matrix(0,ndim,k)
sdbeta=matrix(0,ndim,k)
npar=0
for (i in 1:k){
idx=c(1:ndim)[paridx[,i]==1]
resi=y[,i]
if(length(idx)> 0){
xm=as.matrix(xmtx[,idx])
npar=npar+dim(xm)[2]
xpx=t(xm)%*%xm
xpxinv=solve(xpx)
xpy=t(xm)%*%as.matrix(y[,i],ne,1)
betai=xpxinv%*%xpy
beta[idx,i]=betai
resi=y[,i]-xm%*%betai
nee=dim(xm)[2]
sse=sum(resi*resi)/(Tn-p-nee)
dd=diag(xpxinv)
sdbeta[idx,i]=sqrt(dd*sse)
}
res=cbind(res,resi)
}
sse=t(res)%*%res/(Tn-p)
###cat("npar =",npar,"\n")
#
aic=0
bic=0
hq=0
Phi=NULL
Ph0=NULL
#
jst=0
if(include.mean) {
Ph0=beta[1,]
se=sdbeta[1,]
if(output){
cat("Constant term:","\n")
cat("Estimates: ",Ph0,"\n")
cat("Std.Error: ",se,"\n")
}
jst=1
}
### adjustment npar for computing information criterion
if(include.mean){
for (i in 1:k){
if(abs(Ph0[i]) > 0.00000001)npar=npar-1
}
}
if(output)cat("AR coefficient matrix","\n")
### Phi is a storage for AR coefficient matrices
for (i in 1:p){
phi=t(beta[(jst+1):(jst+k),])
se=t(sdbeta[(jst+1):(jst+k),])
if(output){
cat("AR(",i,")-matrix","\n")
print(phi,digits=3)
cat("standard error","\n")
print(se,digits=3)
}
jst=jst+k
Phi=cbind(Phi,phi)
}
if(output){
cat(" ","\n")
cat("Residuals cov-mtx:","\n")
print(sse)
cat(" ","\n")
}
dd=det(sse)
d1=log(dd)
aic=d1+(2*npar)/Tn
bic=d1+log(Tn)*npar/Tn
hq=d1+2*log(log(Tn))*npar/Tn
if(output){
cat("det(SSE) = ",dd,"\n")
cat("AIC = ",aic,"\n")
cat("BIC = ",bic,"\n")
cat("HQ  = ",hq,"\n")
# end of if(output)
}

VAR<-list(data=x,cnst=include.mean,order=p,coef=beta,aic=aic,bic=bic,hq=hq,residuals=res,secoef=sdbeta,Sigma=sse,Phi=Phi,Ph0=Ph0,fixed=fixed)
}

#####
"refVAR" <- function(model,fixed=NULL,thres=1.0){
# This program automatically refines the "model" by removing estimates
# with abs(t-ration) < thres.
#
# model is an VAR output object.
#
x=as.matrix(model\$data)
nT=dim(x)[1]
k=dim(x)[2]
p=model\$order
if(p < 1)p=1
cnst=model\$cnst
fix=fixed
if(length(fixed)== 0){
coef=as.matrix(model\$coef)
secoef=as.matrix(model\$secoef)
nr=dim(coef)[1]
nc=dim(coef)[2]
for (i in 1:nr){
for (j in 1:nc){
if(secoef[i,j] < 10^(-8))secoef[i,j]=1
}
}
fix=matrix(1,nr,k)
# use Backward elimination to simplify the model: equation by equation
### First: setup the regressor matrix
xmtx=NULL
ist=p+1
y=x[ist:nT,]
ne=nT-p
if(cnst)xmtx=matrix(1,ne,1)
for (j in 1:p){
xmtx=cbind(xmtx,x[(ist-j):(nT-j),])
}
xmtx=as.matrix(xmtx)
### perform first elimination based on the previous estimation
for(j in 1:k){
tt=abs(coef[,j]/secoef[,j])
idx=c(1:nr)[tt == min(tt)]
idx1=idx[1]
if(tt[idx1] < thres)fix[idx,j]=0
}
### Perform further elimination
for (j in 1:k){
npar=sum(fix[,j])
while(npar > 0){
jdx=c(1:nr)[fix[,j]==1]
xp=as.matrix(xmtx[,jdx])
nxp=dim(xp)[2]
m1=lm(y[,j]~-1+xp)
m2=summary(m1)
est=m1\$coefficients
se1=sqrt(diag(m2\$cov.unscaled))*m2\$sigma
tt=abs(est/se1)
idx=c(1:nxp)[tt == min(tt)]
idx1=idx[1]
if(tt[idx1] < thres){
fix[jdx[idx],j]=0
npar=sum(fix[,j])
}
else {
npar=0
}
### end of while-statement
}
## end of the j-loop
}
## end of the if(length(fixed)==0) statement
}

mm=VAR(x,p,output=T,include.mean=cnst,fixed=fix)

refVAR <- list(data=mm\$data,order=p,cnst=cnst,coef=mm\$coef,aic=mm\$aic,bic=mm\$bic,hq=mm\$hq,residuals=mm\$residuals,secoef=mm\$secoef,Sigma=mm\$Sigma,Phi=mm\$Phi,Ph0=mm\$Ph0,fixed=fix)
}

######
"VARs" <- function(x,lags,include.mean=T,output=T,fixed=NULL){
# Fits a vector AR model with selected lags, computes its AIC, BIC, and residuals
# Created on March 30, 2009 by Ruey S. Tsay
#
if(!is.matrix(x))x=as.matrix(x)
nT=dim(x)[1]; k=dim(x)[2]
nlags=length(lags)
#
if(nlags < 0){
lags=c(1)
nlags=1
}
#
lags=sort(lags)
idm=k*nlags+1
p=lags[nlags]
if(p < 1)p=1
ne=nT-p
ist=p+1
y=x[ist:nT,]
jj=lags[1]
if(include.mean){
xmtx=cbind(rep(1,ne),x[(ist-jj):(nT-jj),])
}
else {
xmtx=x[(ist-jj):(nT-jj),]
}
if(nlags > 1){
for (i in 2:nlags){
jj=lags[i]
xmtx=cbind(xmtx,x[(ist-jj):(nT-jj),])
}
}
xmtx=as.matrix(xmtx)
ndim=dim(xmtx)[2]
#
if(length(fixed)==0){
paridx=matrix(1,ndim,k)
}
else {
paridx=fixed
}
#perform estimation component-by-component
res=NULL
beta=matrix(0,ndim,k)
sdbeta=matrix(0,ndim,k)
for (i in 1:k){
idx=c(1:ndim)[paridx[,i]==1]
resi=y[,i]
if(length(idx) > 0){
xm=as.matrix(xmtx[,idx])
xpx = t(xm)%*%xm
xpxinv=solve(xpx)
xpy=t(xm)%*%as.matrix(y[,i],ne,1)
betai=xpxinv%*%xpy
beta[idx,i]=betai
resi=y[,i]-xm%*%betai
sse=sum(resi*resi)/nT
dd=diag(xpxinv)
sdbeta[idx,i]=sqrt(sse*dd)
}
res=cbind(res,resi)
}
sse=t(res)%*%res/nT
Phi=NULL
Ph0=rep(0,k)
if(output){
jst=0
if(include.mean){
Ph0=beta[1,]
se=sdbeta[1,]
cat("Constant term:","\n")
print(Ph0,digits=4)
cat("Std error:","\n")
print(se,digits=4)
jst=1
}
cat("AR coefficient matrix:","\n")
### Phi is a storage for the AR coefficient matrices
Phi=matrix(0,k,p*k)
for (i in 1:nlags){
ord=lags[i]
cat("AR(",ord,")-matrix:","\n")
phi=t(beta[(jst+1):(jst+k),])
se=t(sdbeta[(jst+1):(jst+k),])
print(phi,digits=3)
cat("Standard error:","\n")
print(se,digits=3)
jst=jst+k
cat("      ","\n")
kdx=(ord-1)*k
Phi[,(kdx+1):(kdx+k)]=phi
}
cat("Residuals cov-mtx:","\n")
print(sse)

cat("       ","\n")
dd=det(sse)
cat("det(SSE) = ",dd,"\n")
d1=log(dd)
aic=d1+(2*nlags*k*k)/nT
bic=d1+log(nT)*nlags*k*k/nT
cat("AIC = ",aic,"\n")
cat("BIC = ",bic,"\n")
# end of if(output)
}

VARs<-list(data=x,lags=lags,order=p,cnst=include.mean,coef=beta,aic=aic,bic=bic,residuals=res,secoef=sdbeta,Sigma=sse,Phi=Phi,Ph0=Ph0,fixed=fixed)

}

#####
"refVARs" <- function(model,fixed=NULL,thres=1.0){
#This program either (1) automatically refines a fitted VARs model by setting
# parameters with abs(t-ratio) < thres to zero, or uses manually specified "fixed" to
# simplied the VARs model.
#
# model: an VARs output object.
#
x=as.matrix(model\$data)
nT=dim(x)[1]; k=dim(x)[2]; p=model\$order
lags=sort(model\$lags)
nlags = length(lags)
cnst=model\$cnst
fix=fixed
if(length(fixed)==0){
p=lags[nlags]
coef=as.matrix(model\$coef)
secoef=as.matrix(model\$secoef)
nr=dim(coef)[1]
fix=matrix(1,nr,k)
nc=dim(coef)[2]
for (i in 1:nr){
for (j in 1:nc){
if(secoef[i,j] < 10^(-8))secoef[i,j]=1.0
}
}
# use Backward elimination to simplify the model: equation by equation
### First: setup the regressor matrix
xmtx=NULL
ist=p+1
y=x[ist:nT,]
ne=nT-p
if(cnst)xmtx=cbind(xmtx,rep(1,ne))
for (j in 1:nlags){
xmtx=cbind(xmtx,x[(ist-lags[j]):(nT-lags[j]),])
}
xmtx=as.matrix(xmtx)
### perform first elimination based on the previous estimation
for(j in 1:k){
tt=abs(coef[,j]/secoef[,j])
idx=c(1:nr)[tt == min(tt)]
if(tt[idx] < thres)fix[idx,j]=0
}
### Perform further elimination
for (j in 1:k){
npar=sum(fix[,j])
while(npar > 0){
jdx=c(1:nr)[fix[,j]==1]
xp=as.matrix(xmtx[,jdx])
nxp=dim(xp)[2]
m1=lm(y[,j]~-1+xp)
m2=summary(m1)
est=m1\$coefficients
se1=sqrt(diag(m2\$cov.unscaled))*m2\$sigma
tt=abs(est/se1)
idx=c(1:nxp)[tt == min(tt)]
if(tt[idx] < thres){
fix[jdx[idx],j]=0
npar=sum(fix[,j])
}
else {
npar=0
}
### end of while-statement
}
## end of the j-loop
}
## end of the if(length(fixed)==0) statement
}

mm=VARs(x,lags,include.mean=cnst,output=T,fixed=fix)

refVARs <- list(data=x,lags=lags,cnst=cnst,coef=mm\$coef,aic=mm\$aic,bic=mm\$bic,residuals=mm\$residuals,secoef=mm\$secoef,Sigma=mm\$Sigma,Phi=mm\$Phi,Ph0=mm\$Ph0,fixed=fix)
}

#####
"ccm" <- function(x,lags=12,level=FALSE,output=T){
# Compute and plot the cross-correlation matrices.
# lags: number of lags used.
# level: logical unit for printing.
#
if(!is.matrix(x))x=as.matrix(x)
nT=dim(x)[1]; k=dim(x)[2]
if(lags < 1)lags=1
# remove the sample means
y=scale(x,center=TRUE,scale=FALSE)
V1=cov(y)
if(output){
print("Covariance matrix:")
print(V1,digits=3)
}
se=sqrt(diag(V1))
SD=diag(1/se)
S0=SD%*%V1%*%SD
## S0 used later
ksq=k*k
wk=matrix(0,ksq,(lags+1))
wk[,1]=c(S0)
j=0
if(output){
cat("CCM at lag: ",j,"\n")
print(S0,digits=3)
cat("Simplified matrix:","\n")
}
y=y%*%SD
crit=2.0/sqrt(nT)
for (j in 1:lags){
y1=y[1:(nT-j),]
y2=y[(j+1):nT,]
Sj=t(y2)%*%y1/nT
Smtx=matrix(".",k,k)
for (ii in 1:k){
for (jj in 1:k){
if(Sj[ii,jj] > crit)Smtx[ii,jj]="+"
if(Sj[ii,jj] < -crit)Smtx[ii,jj]="-"
}
}
#
if(output){
cat("CCM at lag: ",j,"\n")
for (ii in 1:k){
cat(Smtx[ii,],"\n")
}
if(level){
cat("Correlations:","\n")
print(Sj,digits=3)
}
## end of if-(output) statement
}
wk[,(j+1)]=c(Sj)
}
##
if(output){
par(mfcol=c(k,k))
#### Set k0 = 4 for plotting purpose
k0=4
if(k > k0)par(mfcol=c(k0,k0))
tdx=c(0,1:lags)
jcnt=0
if(k > 10){
print("Skip the plots due to high dimension!")
}
else{
for (j in 1:ksq){
plot(tdx,wk[j,],type='h',xlab='lag',ylab='ccf',ylim=c(-1,1))
abline(h=c(0))
crit=2/sqrt(nT)
abline(h=c(crit),lty=2)
abline(h=c(-crit),lty=2)
jcnt=jcnt+1
if((jcnt==k0^2) && (k > k0)){
jcnt=0
cat("Hit Enter for more plots:","\n")
}
}
}
par(mfcol=c(1,1))
cat("Hit Enter for p-value plot of individual ccm: ","\n")
## end of if-(output) statement
}
## The following p-value plot was added on May 16, 2012 by Ruey Tsay.
### Obtain a p-value plot of ccm matrix
r0i=solve(S0)
R0=kronecker(r0i,r0i)
pv=rep(0,lags)
for (i in 1:lags){
tmp=matrix(wk[,(i+1)],ksq,1)
tmp1=R0%*%tmp
ci=crossprod(tmp,tmp1)*nT*nT/(nT-i)
pv[i]=1-pchisq(ci,ksq)
}
if(output){
plot(pv,xlab='lag',ylab='p-value',ylim=c(0,1))
abline(h=c(0))
abline(h=c(0.05),col="blue")
title(main="Significance plot of CCM")
}
ccm <- list(ccm=wk,pvalue=pv)
}

"VARorder" <- function(x,maxp=13,output=T){
# Compute the AIC, BIC, HQ values and M-stat
##### Use the same number of data points in model comparison
x1=as.matrix(x)
nT=nrow(x1)
k=ncol(x1)
ksq=k*k
if(maxp < 1)maxp=1
enob=nT-maxp
y=x1[(maxp+1):nT, , drop=FALSE]
ist=maxp+1
xmtx=cbind(rep(1,enob),x1[maxp:(nT-1),])
if(maxp > 1){
for (i in 2:maxp){
xmtx=cbind(xmtx,x1[(ist-i):(nT-i),])
}
}
#### in the above, y is the dependent variable, xmtx is the x-matrix
chidet=rep(0,(maxp+1))
s=cov(y)*(enob-1)/enob
chidet[1]=log(det(s))
aic=rep(0,(maxp+1))
aic[1]=chidet[1]
bic=aic
hq=aic
y=as.matrix(y)
#
for (p in 1:maxp){
idm=k*p+1
xm=xmtx[,1:idm]
xm=as.matrix(xm)
xpx <- crossprod(xm,xm)
xpy <- crossprod(xm,y)
beta <- solve(xpx,xpy)
yhat <- xm%*%beta
resi <- y-yhat
sse <- crossprod(resi,resi)/enob
#print(paste("For p = ",p,"residual variance is", sse))
d1=log(det(sse))
aic[p+1]=d1+(2*p*ksq)/nT
bic[p+1]=d1+(log(nT)*p*ksq)/nT
hq[p+1]=d1+(2*log(log(nT))*p*ksq)/nT
chidet[p+1]=d1
}
maic=min(aic)
aicor=c(1:(maxp+1))[aic==maic]-1
mbic=min(bic)
bicor=c(1:(maxp+1))[bic==mbic]-1
mhq=min(hq)
hqor=c(1:(maxp+1))[hq==mhq]-1

Mstat=rep(0,maxp)
pv=rep(0,maxp)
for (j in 1:maxp){
Mstat[j]=(nT-maxp-k*j-1.5)*(chidet[j]-chidet[j+1])
pv[j]=1-pchisq(Mstat[j],ksq)
}
if(output){
cat("selected order: aic = ",aicor,"\n")
cat("selected order: bic = ",bicor,"\n")
cat("selected order: hq = ",hqor,"\n")
#cat("M statistic and its p-value","\n") ## comment out as
#tmp=cbind(Mstat,pv)                     ## results shown in summary
##print(tmp,digits=4)
#print(round(tmp,4))
}
if(output){
n1=length(aic)-1
### print summary table
cri=cbind(c(0:n1),aic,bic,hq,c(0,Mstat),c(0,pv))
colnames(cri) <- c("p","AIC","BIC","HQ","M(p)","p-value")
cat("Summary table: ","\n")
##print(cri,digits=5)
print(round(cri,4))
}

VARorder <- list(aic=aic,aicor=aicor,bic=bic,bicor=bicor,hq=hq,hqor=hqor,Mstat=Mstat,Mpv=pv)
}
################

"VARorderI" <- function(x,maxp=13,output=T){
# Compute the AIC, BIC, HQ values and M-stat
##### This is a modified version of the old program in "VARorder",
##### which uses the same number of data points.
#####  This version was adopted on September 8, 2012 in Singapore.
#####
x1=as.matrix(x)
nT=nrow(x1)
k=ncol(x1)
ksq=k*k
if(maxp < 1)maxp=1
### initialization
chidet=rep(0,(maxp+1))
s=cov(x1)*(nT-1)/nT
chidet[1]=log(det(s))
aic=chidet; bic=aic; hq=aic
#
for (p in 1:maxp){
idm=k*p+1
ist=p+1
enob=nT-p
y=as.matrix(x1[ist:nT, , drop=FALSE])
xmtx=rep(1,enob)
for (j in 1:p){
xmtx=cbind(xmtx,x1[(ist-j):(nT-j),])
}
xm=as.matrix(xmtx)
xpx <- crossprod(xm,xm)
xpy <- crossprod(xm,y)
beta <- solve(xpx,xpy)
yhat <- xm%*%beta
resi <- y-yhat
sse <- crossprod(resi,resi)/enob
#print(paste("For p = ",p,"residual variance is", sse))
d1=log(det(sse))
aic[p+1]=d1+(2*p*ksq)/enob
bic[p+1]=d1+(log(enob)*p*ksq)/enob
hq[p+1]=d1+(2*log(log(enob))*p*ksq)/enob
chidet[p+1]=d1
}
maic=min(aic)
aicor=c(1:(maxp+1))[aic==maic]-1
mbic=min(bic)
bicor=c(1:(maxp+1))[bic==mbic]-1
mhq=min(hq)
hqor=c(1:(maxp+1))[hq==mhq]-1

Mstat=rep(0,maxp)
pv=rep(0,maxp)
for (j in 1:maxp){
Mstat[j]=(nT-maxp-k*j-1.5)*(chidet[j]-chidet[j+1])
pv[j]=1-pchisq(Mstat[j],ksq)
}

if(output){
cat("selected order: aic = ",aicor,"\n")
cat("selected order: bic = ",bicor,"\n")
cat("selected order: hq = ",hqor,"\n")
cat("M statistic and its p-value","\n")
tmp=cbind(Mstat,pv)
print(tmp,digits=4)
##
n1=length(aic)-1
### print summary table
cri=cbind(c(0:n1),aic,bic,hq,c(0,Mstat),c(0,pv))
colnames(cri) <- c("p","AIC","BIC","HQ","M(p)","p-value")
cat("Summary table: ","\n")
print(cri,digits=5)
}

VARorderI <- list(aic=aic,aicor=aicor,bic=bic,bicor=bicor,hq=hq,hqor=hqor,Mstat=Mstat,Mpv=pv)
}
##############################

"VARpsi" <- function(Phi,lag=5){
# Computes the psi-weight matrices of a VAR(p) model.
# Phi=[phi1,phi2,phi3, ....] coefficient matrix
# Created by Ruey S. Tsay, April 2009 & modified on April 2011.
#
# Compute MA representions
k=nrow(Phi)
m=ncol(Phi)
p=floor(m/k)
Si=diag(rep(1,k))
if(p < 1) p =1
if(lag < 1) lag=1
#
for (i in 1:lag){
if (i <= p){
idx=(i-1)*k
tmp=Phi[,(idx+1):(idx+k)]
}
else{
tmp=matrix(0,k,k)
}
#
jj=i-1
jp=min(jj,p)
if(jp > 0){
for(j in 1:jp){
jdx=(j-1)*k
idx=(i-j)*k
w1=Phi[,(jdx+1):(jdx+k)]
w2=Si[,(idx+1):(idx+k)]
tmp=tmp+w1%*%w2
##print(tmp,digits=4)
}
}
Si=cbind(Si,tmp)
}

VARpsi <- list(psi=Si)
}

"VARpred" <- function(model,h=1,orig=0,Out.level=FALSE,output=TRUE){
# Computes the i=1, 2, ..., h-step ahead predictions of a VAR(p) model.
# Phi=[phi1,phi2,phi3, ....] coefficient matrix
# cnst= constant term
# Created by Ruey S. Tsay in April 2011.
#
# Modified on April 20, 2011
# It needs the program VARpsi.R to obtain the psi-weights
# First compute the psi-weights of the VAR(p) model.
#
# Modifed on March 29, 2012 to include MSE of using
#  estimated parameters.
#
# model is a VAR output object.
# Out.level : control the details of output.
# output: Control printing forecast results
x=model\$data
Phi=model\$Phi
sig=model\$Sigma
Ph0=model\$Ph0
p=model\$order
cnst=model\$cnst
np=dim(Phi)[2]
k=dim(x)[2]
#
nT=dim(x)[1]
k=dim(x)[2]
if(orig <= 0)orig=nT
if(orig > nT)orig=nT
psi=VARpsi(Phi,h)\$psi
beta=t(Phi)
if(length(Ph0) < 1)Ph0=rep(0,k)
if(p > orig){
cat("Too few data points to produce forecasts","\n")
}
pred=NULL
se=NULL
MSE=NULL
mse=NULL
px=as.matrix(x[1:orig,])
Past=px[orig,]
if(p > 1){
for (j in 1:(p-1)){
Past=c(Past,px[(orig-j),])
}
}
#
# Setup to compute MSE (due to estimated parameters.
# Compute G-matrix and construct P-matrix
cat("orig ",orig,"\n")
ne=orig-p
xmtx=NULL
P=NULL
if(cnst)xmtx=rep(1,ne)
xmtx=cbind(xmtx,x[p:(orig-1),])
ist=p+1
if(p > 1){
for (j in 2:p){
xmtx=cbind(xmtx,x[(ist-j):(orig-j),])
}
}
xmtx=as.matrix(xmtx)
G=t(xmtx)%*%xmtx/ne
Ginv=solve(G)
##cat("G-matrix: ","\n")
##print(G)
##cat("Ginv","\n")
##print(Ginv)
#
P = Phi
vv=Ph0
if(p > 1){
II=diag(rep(1,k*(p-1)))
II=cbind(II,matrix(0,(p-1)*k,k))
P=rbind(P,II)
vv=c(vv,rep(0,(p-1)*k))
}
if(cnst){
c1=c(1,rep(0,np))
P=cbind(vv,P)
P=rbind(c1,P)
}
##
##cat("P-matrix","\n")
##print(P)
#
Sig=sig
n1=dim(P)[2]
MSE= (n1/orig)*sig
for (j in 1:h){
tmp=Ph0+matrix(Past,1,np)%*%beta
px=rbind(px,tmp)
if(np > k){
Past=c(tmp,Past[1:(np-k)])
}else{
Past=tmp
}
#
#### Compute variance of forecast errors for j > 1.
if(j > 1){
idx=(j-1)*k
wk=psi[,(idx+1):(idx+k)]
Sig=Sig+wk%*%sig%*%t(wk)
}
##### Compute MSE of forecast errors for j > 1.
if(j > 1){
for (ii in 0:(j-1)){
psii=diag(rep(1,k))
if(ii > 0){
idx=ii*k
psii=psi[,(idx+1):(idx+k)]
}
P1=P^(j-1-ii)%*%Ginv
for (jj in 0:(j-1)){
psij=diag(rep(1,k))
if(jj > 0){
jdx=jj*k
psij=psi[,(jdx+1):(jdx+k)]
}
P2=P^(j-1-jj)%*%G
k1=sum(diag(P1%*%P2))
MSE=(k1/orig)*psii%*%sig%*%t(psij)
}
}
#
}
#
se=rbind(se,sqrt(diag(Sig)))
if(Out.level){
cat("Covariance matrix of forecast errors at horizon: ",j,"\n")
print(Sig)
cat("Omega matrix at horizon: ",j,"\n")
print(MSE)
}
#
MSE=MSE+Sig
mse=rbind(mse,sqrt(diag(MSE)))
}
if(output){
cat("Forecasts at origin: ",orig,"\n")
print(px[(orig+1):(orig+h),],digits=4)
cat("Standard Errors of predictions: ","\n")
print(se[1:h,],digits=4)
pred=px[(orig+1):(orig+h),]
cat("Root mean square errors of predictions: ","\n")
print(mse[1:h,],digits=4)
}
if(orig < nT){
cat("Observations, predicted values,     errors, and MSE","\n")
tmp=NULL
jend=min(nT,(orig+h))
for (t in (orig+1):jend){
case=c(t,x[t,],px[t,],x[t,]-px[t,])
tmp=rbind(tmp,case)
}
colnames(tmp) <- c("time",rep("obs",k),rep("fcst",k),rep("err",k))
idx=c(1)
for (j in 1:k){
idx=c(idx,c(0,1,2)*k+j+1)
}
tmp = tmp[,idx]
#print(tmp,digits=3)
print(round(tmp,4))
}

VARpred <- list(pred=pred,se.err=se,rmse=mse)
}

"VARfore" <- function(model,h=1,orig=0){
# Computes the i=1, 2, ..., h-step ahead predictions of a VAR(p) model.
# Phi=[phi1,phi2,phi3, ....] coefficient matrix
# cnst= constant term
# model is a VAR output object.
#
x=model\$data
Phi=model\$Phi
sig=model\$Sigma
Ph0=model\$Ph0
p=model\$order
np=dim(Phi)[2]
#
nT=dim(x)[1]
k=dim(x)[2]
if(orig <= 0)orig=nT
if(orig > nT)orig=nT
psi=VARpsi(Phi,h)\$psi
beta=t(Phi)
if(length(Ph0) < 1)Ph0=rep(0,k)
if(p > orig){
cat("Too few data points to produce forecasts","\n")
}
pred=NULL
se=NULL
px=as.matrix(x[1:orig,])
Past=px[orig,]
if(p > 1){
for (j in 1:(p-1)){
Past=c(Past,px[(orig-j),])
}
}
#
for (j in 1:h){
tmp=Ph0+matrix(Past,1,np)%*%beta
px=rbind(px,tmp)
if(np > k){
Past=c(tmp,Past[1:(np-k)])
}else{
Past=tmp
}
Sig=sig
if (j > 1){
for (ii in 1:(j-1)){
idx=ii*k
wk=psi[,(idx+1):(idx+k)]
Sig=Sig+wk%*%sig%*%t(wk)
}
}
se=rbind(se,sqrt(diag(Sig)))
}
cat("Forecasts at origin: ",orig,"\n")
print(px[(orig+1):(orig+h),],digits=4)
cat("Standard Errors of predictions: ","\n")
print(se[1:h,],digits=4)
pred=px[(orig+1):(orig+h),]
if(orig < nT){
cat("Observations, predicted values, and errors","\n")
tmp=NULL
jend=min(nT,(orig+h))
for (t in (orig+1):jend){
case=c(t,x[t,],px[t,],x[t,]-px[t,])
tmp=rbind(tmp,case)
}
colnames(tmp) <- c("time",rep("obs",k),rep("fcst",k),rep("err",k))
idx=c(1)
for (j in 1:k){
idx=c(idx,c(0,1,2)*k+j+1)
}
tmp = tmp[,idx]
##print(tmp,digits=3)
print(round(tmp,4))
}
VARfore <- list(pred=pred,se.err=se)
}

"VARMAsim" <- function(nobs,arlags=NULL,malags=NULL,cnst=NULL,phi=NULL,theta=NULL,skip=200,sigma){
# Generate VARMA(p,q) time series using Gaussian innovations.
# p: ar order (lags can be skipped)
# q: ma order (lags can be skipped)
# nobs: sample size
# cnst: constant vector
# phi: store AR coefficient matrices [phi1,phi2,...]
# theta: store MA coefficient matrices [theta1,theta2,...]
# arlags: order for each AR coefficient matrix
# malags: order for each MA coefficient matrix.
#
if(!is.matrix(sigma))sigma=as.matrix(sigma)
k=nrow(sigma)
nT=nobs+skip
at=rmvnorm(nT,rep(0,k),sigma)
nar=length(arlags)
p=0
if(nar > 0){
arlags=sort(arlags)
p=arlags[nar]
}
q=0
nma=length(malags)
if(nma > 0){
malags=sort(malags)
q=malags[nma]
}
ist=max(p,q)+1
zt=matrix(0,nT,k)
if(length(cnst)==0)cnst=rep(0,k)

for (it in ist:nT){
tmp=matrix(at[it,],1,k)
if(nma > 0){
for (j in 1:nma){
jdx=(j-1)*k
thej=theta[,(jdx+1):(jdx+k)]
atm=matrix(at[it-malags[j],],1,k)
tmp=tmp-atm%*%t(thej)
}
}
if(nar > 0){
for (i in 1:nar){
idx=(i-1)*k
phj = phi[,(idx+1):(idx+k)]
ztm=matrix(zt[it-arlags[i],],1,k)
tmp=tmp+ztm%*%t(phj)
}
}
zt[it,]=cnst+tmp
}
# skip the first "skip" points
zt=zt[(1+skip):nT,]
at=at[(1+skip):nT,]

VARMAsim <- list(series=zt,noises=at)
}

"VARirf" <- function(Phi,Sig,lag=12,orth=TRUE){
# Computes impulse response function of a given VAR(p) model.
# Phi: k by kp matrix of AR ceofficients, i.e. [AR1,AR2,AR3, ..., ARp]
# Sig: residual covariance matrix
# Output: (a) Plot and (b) Impulse response function [Psi1,Psi2, ....]
if(!is.matrix(Phi))Phi=as.matrix(Phi)
if(!is.matrix(Sig))Sig=as.matrix(Sig)
# Compute MA representions: This gives impulse response function without considering Sigma.
k=nrow(Phi)
m=ncol(Phi)
p=floor(m/k)
Si=diag(rep(1,k))
wk=c(Si)
## acuwk: accumulated response
awk=c(wk)
acuwk=c(awk)
if(p < 1) p =1
if(lag < 1) lag=1
#
for (i in 1:lag){
if (i <= p){
idx=(i-1)*k
tmp=Phi[,(idx+1):(idx+k)]
}
else{
tmp=matrix(0,k,k)
}
#
jj=i-1
jp=min(jj,p)
if(jp > 0){
for(j in 1:jp){
jdx=(j-1)*k
idx=(i-j)*k
w1=Phi[,(jdx+1):(jdx+k)]
w2=Si[,(idx+1):(idx+k)]
tmp=tmp+w1%*%w2
##print(tmp,digits=4)
}
}
Si=cbind(Si,tmp)
wk=cbind(wk,c(tmp))
awk=awk+c(tmp)
acuwk=cbind(acuwk,awk)
##print(Si,digits=3)
}
# Compute the impulse response of orthogonal innovations
orSi=NULL
wk1=NULL
awk1=NULL
acuwk1=NULL
if(orth){
m1=chol(Sig)
P=t(m1)
wk1=cbind(wk1,c(P))
awk1=wk1
acuwk1=wk1
orSi=cbind(orSi,P)
for(i in 1:lag){
idx=i*k
w1=Si[,(idx+1):(idx+k)]
w2=w1%*%P
orSi=cbind(orSi,w2)
wk1=cbind(wk1,c(w2))
awk1=awk1+c(w2)
acuwk1=cbind(acuwk1,awk1)
}
}

tdx=c(1:(lag+1))-1
par(mfcol=c(k,k),mai=c(0.3,0.3,0.3,0.3))
if(orth){
gmax=max(wk1)
gmin=min(wk1)
cx=(gmax-gmin)/10
gmax=gmax+cx
gmin=gmin-cx
for (j in 1:k^2){
plot(tdx,wk1[j,],type='l',xlab='lag',ylab='IRF',ylim=c(gmin,gmax),cex.axis=0.8)
points(tdx,wk1[j,],pch='*',cex=0.8)
title(main='Orth. innovations')
}
gmax=max(acuwk1)
gmin=min(acuwk1)
cx=(gmax-gmin)/10
gmax=gmax+cx
gmin=gmin-cx
for (j in 1:k^2){
plot(tdx,acuwk1[j,],type='l',xlab='lag',ylab="Acu-IRF",ylim=c(gmin,gmax),cex.axis=0.8)
points(tdx,acuwk1[j,],pch="*",cex=0.8)
title(main='Orth. innovations')
}
}
else{
gmax=max(wk)
gmin=min(wk)
cx=(gmax-gmin)/10
gmax=gmax+cx
gmin=gmin-cx
for(j in 1:k^2){
plot(tdx,wk[j,],type='l',xlab='lag',ylab='IRF',ylim=c(gmin,gmax),cex.axis=0.8)
points(tdx,wk[j,],pch='*',cex=0.8)
title(main="Orig. innovations")
}
gmax=max(acuwk)
gmin=min(acuwk)
cx=(gmax-gmin)/10
gmax=gmax+cx
gmin=gmin-cx
for(j in 1:k^2){
plot(tdx,acuwk[j,],type='l',xlab='lag',ylab='Acu-IRF',ylim=c(gmin,gmax),cex.axis=0.8)
points(tdx,acuwk[j,],pch='*',cex=0.8)
title(main="Orig. innovations")
}
}

VARirf <- list(irf=Si,orthirf=orSi)
}

# Compute multivariate Ljung-Box test statistics
#
# adj: adjustment for the degrees of freedomm in the chi-square distribution.
# adj is the number of coefficient parameters used in the fitted model, if any.
#
if(!is.matrix(x))x=as.matrix(x)
nr=nrow(x)
nc=ncol(x)
g0=var(x)
ginv=solve(g0)
qm=0.0
QM=NULL
df = 0
for (i in 1:lag){
x1=x[(i+1):nr,]
x2=x[1:(nr-i),]
g = cov(x1,x2)
g = g*(nr-i-1)/(nr-1)
h=t(g)%*%ginv%*%g%*%ginv
qm=qm+nr*nr*sum(diag(h))/(nr-i)
df=df+nc*nc
mindeg=nc^2-1
pv = 1
if(dff > mindeg)pv=1-pchisq(qm,dff)
QM=rbind(QM,c(i,qm,dff,pv))
}
pvs=QM[,4]
dimnames(QM) = list(names(pvs),c("  m  ","    Q(m) ","   df  "," p-value"))
cat("Ljung-Box Statistics: ","\n")
printCoefmat(QM,digits = 3)
#
par(mfcol=c(1,1))
plot(pvs,ylim=c(0,1),xlab="m",ylab="prob",main="p-values of Ljung-Box statistics")
abline(h=c(0))
lines(rep(0.05,lag),lty=2,col='blue')
}

###
"VMAorder" <- function(x,lag=20){
# Compute multivariate Ljung-Box test statistics
# to identify the VMA order.
#
if(!is.matrix(x))x=as.matrix(x)
nr=dim(x)[1]
nc=dim(x)[2]
g0=var(x)
ginv=solve(g0)
qm=NULL
for (i in 1:lag){
x1=x[(i+1):nr,]
x2=x[1:(nr-i),]
g = cov(x1,x2)
g = g*(nr-i-1)/(nr-1)
h=t(g)%*%ginv%*%g%*%ginv
qmi=nr*nr*sum(diag(h))/(nr-i)
qm=c(qmi,qm)
}
tst=rev(cumsum(qm))
ksq=nc*nc; df=ksq*lag
QM=NULL
for (i in 1:lag){
pv=1-pchisq(tst[i],df)
QM=rbind(QM,c(i,tst[i],pv))
df=df-ksq
}
pvs=QM[,3]
dimnames(QM) = list(names(pvs),c("  j  ","  Q(j,m) "," p-value"))
cat("Q(j,m) Statistics: ","\n")
printCoefmat(QM,digits = 3)
## Plot
par(mfcol=c(1,1))
plot(pvs,ylim=c(0,1),xlab='j',ylab='prob',main="p-values: Q(j,m) Statistics")
abline(h=c(0))
lines(rep(0.05,lag),lty=2,col='blue')
#
}

#### VMA programs
"VMA" <- function(da,q=1,include.mean=T,fixed=NULL,beta=NULL,sebeta=NULL,prelim=F,details=F,thres=2.0){
# Estimation of a vector MA model using conditional MLE (Gaussian dist)
#
# April 18: add subcommand "prelim" to see simplification after the AR approximation.
# When prelim=TRUE, fixed is assigned based on the results of AR approximation.
# Here "thres" is used only when prelim = TRUE.
##
### Create the "mFilter" program to simplify computation of residuals. April 8, 2012.
#
if(!is.matrix(da))da=as.matrix(da)
nT=dim(da)[1]
k=dim(da)[2]
if(q < 1)q=1
kq=k*q
#
THini <- function(y,x,q,include.mean){
# use residuals of a long VAR model to obtain initial estimates of
# VMA coefficients.
if(!is.matrix(y))y=as.matrix(y)
if(!is.matrix(x))x=as.matrix(x)
nT=dim(y)[1]
k=dim(y)[2]
ist=1+q
ne=nT-q
if(include.mean){
xmtx=matrix(1,ne,1)
}
else {
xmtx=NULL
}
ymtx=y[ist:nT,]
for (j in 1:q){
xmtx=cbind(xmtx,x[(ist-j):(nT-j),])
}
xtx=crossprod(xmtx,xmtx)
xty=crossprod(xmtx,ymtx)
xtxinv=solve(xtx)
beta=xtxinv%*%xty
resi= ymtx - xmtx%*%beta
sse=crossprod(resi,resi)/ne
dd=diag(xtxinv)
sebeta=NULL
for (j in 1:k){
se=sqrt(dd*sse[j,j])
sebeta=cbind(sebeta,se)
}
THini <- list(estimates=beta,se=sebeta)
}

if(length(fixed) < 1){
m1=VARorder(da,q+12,output=FALSE)
porder=m1\$aicor
if(porder < 1)porder=1
m2=VAR(da,porder,output=FALSE)
y=da[(porder+1):nT,]
x=m2\$residuals
m3=THini(y,x,q,include.mean)
beta=m3\$estimates
sebeta=m3\$se
nr=dim(beta)[1]
### Preliminary simplification
if(prelim){
fixed = matrix(0,nr,k)
for (j in 1:k){
tt=beta[,j]/sebeta[,j]
idx=c(1:nr)[abs(tt) >= thres]
fixed[idx,j]=1
}
}
#
if(length(fixed) < 1){fixed=matrix(1,nr,k)}
}
else{
nr=dim(beta)[1]
}
#
par=NULL
separ=NULL
fix1=fixed
#
#
VMAcnt=0
ist=0
if(include.mean){
jdx=c(1:k)[fix1[1,]==1]
VMAcnt=length(jdx)
if(VMAcnt > 0){
par=beta[1,jdx]
separ=sebeta[1,jdx]
}
TH=-beta[2:(kq+1),]
seTH=sebeta[2:(kq+1),]
ist=1
}
else {
TH=-beta
seTH=sebeta
}
#########
for (j in 1:k){
idx=c(1:(nr-ist))[fix1[(ist+1):nr,j]==1]
if(length(idx) > 0){
par=c(par,TH[idx,j])
separ=c(separ,seTH[idx,j])
}
}
#
ParMA <- par

LLKvma <- function(par,zt=zt,q=q,fixed=fix1,include.mean=include.mean){
## the model used is
## x_t' = mu' + a_t' -a_{t-1}'theta_1' - a_{t-2}'theta_2' - ...
## a_t' = x_t' - mu' + a_{t-1}'theta_1'+a_{t-2}'theta_2' + ....
k=ncol(zt)
nT=nrow(zt)
mu=rep(0,k)
icnt=0; VMAcnt <- 0
fix <- fixed
#
iist=0
if(include.mean){
iist=1
jdx=c(1:k)[fix[1,]==1]
icnt=length(jdx); VMAcnt <- icnt
if(icnt > 0)
mu[jdx]=par[1:icnt]
}
### remove the mean
for (j in 1:k){
zt[,j]=zt[,j]-mu[j]
}
## recursively compute the residual series: at
kq=k*q
Theta=matrix(0,kq,k)
for (j in 1:k){
idx=c(1:kq)[fix[(iist+1):(iist+kq),j]==1]
jcnt=length(idx)
if(jcnt > 0){
Theta[idx,j]=par[(icnt+1):(icnt+jcnt)]
icnt=icnt+jcnt
}
}
# Theta = rbind[theta_1',theta_2', ..., theta_q']
### Checking the invertibility of t(Theta)
TH=t(Theta)
if(q > 1){
tmp=cbind(diag(rep(1,(q-1)*k)),matrix(0,(q-1)*k,k))
TH=rbind(TH,tmp)
}
mm=eigen(TH)
V1=mm\$values
P1=mm\$vectors
v1=Mod(V1)
ich=0
for (i in 1:kq){
if(v1[i] > 1)V1[i]=1/V1[i]
ich=1
}
if(ich > 0){
P1i=solve(P1)
GG=diag(V1)
TH=Re(P1%*%GG%*%P1i)
Theta=t(TH[1:k,])
##print(TH[1:k,])
ist=0
if(VMAcnt > 0)ist=1
for (j in 1:k){
idx=c(1:kq)[fix[(ist+1):(ist+kq),j]==1]
jcnt=length(idx)
if(jcnt > 0){
par[(icnt+1):(icnt+jcnt)]=TH[j,idx]
icnt=icnt+jcnt
}
}
##
}
##
at=mFilter(zt,t(Theta))
#
sig=t(at)%*%at/nT
##ll=dmnorm(at,rep(0,k),sig)
ll=dmvnorm(at,rep(0,k),sig)
LLKvma=-sum(log(ll))
LLKvma
}
#
cat("Number of parameters: ",length(par),"\n")
cat("initial estimates: ",round(par,4),"\n")
### Set up lower and upper bounds
lowerBounds=par; upperBounds=par
npar=length(par)
mult=2.0
if((npar > 10)||(q > 2))mult=1.2
for (j in 1:npar){
lowerBounds[j] = par[j]-mult*separ[j]
upperBounds[j] = par[j]+mult*separ[j]
}
cat("Par. Lower-bounds: ",round(lowerBounds,4),"\n")
cat("Par. Upper-bounds: ",round(upperBounds,4),"\n")
###mm=optim(par,LLKvma,method=c("L-BFGS-B"),lower=lowerBounds,upper=upperBounds,hessian=TRUE)
###mm=optim(par,LLKvma,method=c("BFGS"),hessian=TRUE)
##est=mm\$par
##H=mm\$hessian
# Step 5: Estimate Parameters and Compute Numerically Hessian:
if(details){
fit = nlminb(start = ParMA, objective = LLKvma,zt=da,fixed=fixed,include.mean=include.mean,q=q,
lower = lowerBounds, upper = upperBounds, control = list(trace=3))
}
else {
fit = nlminb(start = ParMA, objective = LLKvma, zt=da, fixed=fixed, include.mean=include.mean, q=q,
lower = lowerBounds, upper = upperBounds)
}
epsilon = 0.0001 * fit\$par
npar=length(par)
Hessian = matrix(0, ncol = npar, nrow = npar)
for (i in 1:npar) {
for (j in 1:npar) {
x1 = x2 = x3 = x4  = fit\$par
x1[i] = x1[i] + epsilon[i]; x1[j] = x1[j] + epsilon[j]
x2[i] = x2[i] + epsilon[i]; x2[j] = x2[j] - epsilon[j]
x3[i] = x3[i] - epsilon[i]; x3[j] = x3[j] + epsilon[j]
x4[i] = x4[i] - epsilon[i]; x4[j] = x4[j] - epsilon[j]
Hessian[i, j] = (LLKvma(x1,zt=da,q=q,fixed=fixed,include.mean=include.mean)
-LLKvma(x2,zt=da,q=q,fixed=fixed,include.mean=include.mean)
-LLKvma(x3,zt=da,q=q,fixed=fixed,include.mean=include.mean)
+LLKvma(x4,zt=da,q=q,fixed=fixed,include.mean=include.mean))/
(4*epsilon[i]*epsilon[j])
}
}
est=fit\$par
cat("Final   Estimates: ",est,"\n")
# Step 6: Create and Print Summary Report:
se.coef = sqrt(diag(solve(Hessian)))
tval = fit\$par/se.coef
matcoef = cbind(fit\$par, se.coef, tval, 2*(1-pnorm(abs(tval))))
dimnames(matcoef) = list(names(tval), c(" Estimate",
" Std. Error", " t value", "Pr(>|t|)"))
cat("\nCoefficient(s):\n")
printCoefmat(matcoef, digits = 4, signif.stars = TRUE)
#
### recover to the format of unconstrained case for printing purpose.
cat("---","\n")
cat("Estimates in matrix form:","\n")
icnt=0
ist=0
cnt=NULL
if(include.mean){
ist=1
cnt=rep(0,k)
secnt=rep(1,k)
jdx=c(1:k)[fix1[1,]==1]
icnt=length(jdx)
if(icnt > 0){
cnt[jdx]=est[1:icnt]
secnt[jdx]=se.coef[1:icnt]
cat("Constant term: ","\n")
cat("Estimates: ",cnt,"\n")
}
}
cat("MA coefficient matrix","\n")
TH=matrix(0,kq,k)
seTH=matrix(1,kq,k)
for (j in 1:k){
idx=c(1:kq)[fix1[(ist+1):nr,j]==1]
jcnt=length(idx)
if(jcnt > 0){
TH[idx,j]=est[(icnt+1):(icnt+jcnt)]
seTH[idx,j]=se.coef[(icnt+1):(icnt+jcnt)]
icnt=icnt+jcnt
}
}
icnt=0
for (i in 1:q){
cat("MA(",i,")-matrix","\n")
theta=t(TH[(icnt+1):(icnt+k),])
print(theta,digits=3)
icnt=icnt+k
}
## Compute the residuals
zt=da
if(include.mean){
for (i in 1:k){
zt[,i]=zt[,i]-cnt[i]
}
}
### Use mFilter to compute residuals (April 18, 2012)
at=mFilter(zt,t(TH))
sig=t(at)%*%at/nT
cat(" ","\n")
cat("Residuals cov-matrix:","\n")
print(sig)
dd=det(sig)
d1=log(dd)
aic=d1+2*npar/nT
bic=d1+log(nT)*npar/nT
cat("----","\n")
cat("aic= ",aic,"\n")
cat("bic= ",bic,"\n")
### prepare for output storage
Theta=t(TH)
if(include.mean){
TH=rbind(cnt,TH)
seTH=rbind(secnt,seTH)
}

VMA <- list(data=da,MAorder=q,cnst=include.mean,coef=TH,secoef=seTH,residuals=at,Sigma=sig,Theta=Theta,mu=cnt,aic=aic,bic=bic)
}

####
"refVMA" <- function(model,thres=1.0){
# This program refines the fitted models of VMA output by removing
# insigificant parameters with abs(t-ratio) < thres.
# model: output object from VMA
# thres: threshold value
#
x = model\$data
q = model\$MAorder
cnst = model\$cnst
coef=as.matrix(model\$coef)
secoef=as.matrix(model\$secoef)
nr=dim(coef)[1]
nc=dim(coef)[2]
for (i in 1:nc){
idx=is.na(secoef[,i])
jdx=c(1:nr)[idx==T]
secoef[jdx,i]=0.01
}
fix=matrix(0,nr,nc)
for (j in 1:nc){
tt=coef[,j]/secoef[,j]
idx=c(1:nr)[abs(tt) >= thres]
fix[idx,j]=1
}
### Try to keep the constant if the t-ratio is greater than 1.
if(cnst){
tt=coef[1,]/secoef[1,]
idx=c(1:nc)[abs(tt) > 1.0]
if(length(idx) > 0)fix[1,idx]=1
}

mm=VMA(x,q=q,include.mean=cnst,fixed=fix,beta=coef,sebeta=secoef)

refVMA <- list(data=x,MAorder=q,cnst=cnst,coef=mm\$coef,secoef=mm\$secoef,residuals=mm\$residuals,Sigma=mm\$Sigma,aic=mm\$aic,bic=mm\$bic,mu=mm\$mu,Theta=mm\$Theta,fixed=fix)
}

####
"VMAs" <- function(da,malags,include.mean=T,fixed=NULL,prelim=F,details=F,thres=2.0){
# Estimation of a vector MA model using conditional MLE (Gaussian dist)
# The MA lags are given specifically.
#
if(!is.matrix(da))da=as.matrix(da)
nT <- dim(da)[1]; k <- dim(da)[2]
nlags=length(malags)
if(nlags < 1){
malags=c(1)
nlags=1
}
MAlag <- sort(malags)
kq=k*nlags
# find the maximum MA order
q=MAlag[nlags]
#
THinis <- function(y,x,MAlag,include.mean){
# use residuals of a long VAR model to obtain initial estimates of
# VMA coefficients.
if(!is.matrix(y))y=as.matrix(y)
if(!is.matrix(x))x=as.matrix(x)
nT=dim(y)[1]
k=dim(y)[2]
nlags=length(MAlag)
q=MAlag[nlags]
ist=1+q
ne=nT-q
if(include.mean){
xmtx=matrix(1,ne,1)
}
else {
xmtx=NULL
}
ymtx=y[ist:nT,]
for (j in 1:nlags){
jj=MAlag[j]
xmtx=cbind(xmtx,x[(ist-jj):(nT-jj),])
}
xtx=crossprod(xmtx,xmtx)
xty=crossprod(xmtx,ymtx)
xtxinv=solve(xtx)
beta=xtxinv%*%xty
# compute standard errors
resi=ymtx - xmtx%*%beta
sse=crossprod(resi,resi)/ne
dd=diag(xtxinv)
sebeta=NULL
for (j in 1:k){
se=sqrt(dd*sse[j,j])
sebeta=cbind(sebeta,se)
}
THinis <- list(estimates=beta, se=sebeta)
}

# Obtain initial parameter estimates
### Use VAR approximation to obtain initial parameter estimates
m1=VARorder(da,q+10,output=FALSE)
porder=m1\$aicor
m2=VAR(da,porder,output=FALSE)
y=da[(porder+1):nT,]
x=m2\$residuals
m3=THinis(y,x,MAlag,include.mean)
beta=m3\$estimates
sebeta=m3\$se
nr=dim(beta)[1]
#
if(prelim){
fixed=matrix(0,nr,k)
for (j in 1:k){
tt=beta[,j]/sebeta[,j]
idx=c(1:nr)[abs(tt) >= thres]
fixed[idx,j]=1
}
}
####
if(length(fixed)==0){fixed=matrix(1,nr,k)}
#
par=NULL
separ=NULL
ist=0
if(include.mean){
jdx=c(1:k)[fixed[1,]==1]
if(length(jdx) > 0){
par=beta[1,jdx]
separ=sebeta[1,jdx]
}
TH=-beta[2:(kq+1),]
seTH=sebeta[2:(kq+1),]
ist=1
}
else {
TH=-beta
seTH=sebeta
}
#####
for (j in 1:k){
idx=c(1:(nr-ist))[fixed[(ist+1):nr,j]==1]
if(length(idx)>0){
par=c(par,TH[idx,j])
separ=c(separ,seTH[idx,j])
}
}
cat("Initial estimates: ",round(par,4),"\n")
### Set up lower and upper bounds
lowerBounds=par; upperBounds=par
for (j in 1:length(par)){
lowerBounds[j] = par[j]-2*separ[j]
upperBounds[j] = par[j]+2*separ[j]
}
cat("Par. lower-bounds: ",round(lowerBounds,4),"\n")
cat("Par. upper-bounds: ",round(upperBounds,4),"\n")
###
LLKvmas <- function(par,zt=da, include.mean=include.mean, MAlag=MAlag, fixed=fixed){
## the model used is
## x_t' = mu' + a_t' -a_{t-1}'theta_1' - a_{t-2}'theta_2' - ...
## a_t' = x_t' - mu' + a_{t-1}'theta_1'+a_{t-2}'theta_2' + ....
k=ncol(zt)
nT=nrow(zt)
nlags=length(MAlag)
q=MAlag[nlags]
fix <- fixed
#
mu=rep(0,k)
ist=0
icnt=0; VMAcnt <- 0
if(include.mean){
ist=1
jdx=c(1:k)[fix[1,]==1]
icnt=length(jdx); VMAcnt <- icnt
mu[jdx]=par[1:icnt]
### remove the mean
for (j in 1:k){
zt[,j]=zt[,j]-mu[j]
}
}
## recursively compute the residual series: at
kq=k*nlags
Theta=matrix(0,kq,k)
for (j in 1:k){
idx=c(1:kq)[fix[(ist+1):(ist+kq),j]==1]
jcnt=length(idx)
if(jcnt > 0){
Theta[idx,j]=par[(icnt+1):(icnt+jcnt)]
icnt=icnt+jcnt
}
}

# Theta = rbind[theta_1',theta_2', ..., theta_q']
at=zt[1:MAlag[1],]
if(MAlag[1]==1)at=matrix(at,1,k)
if(q >= (MAlag[1]+1)){
for(t in (MAlag[1]+1):q){
Past=NULL
for (ii in 1:nlags){
jj=MAlag[ii]
if((t-jj) > 0){
Past=c(Past,at[t-jj,])
}
else {
Past=c(Past,rep(0,k))
}
}
tmp=zt[t,]+matrix(Past,1,kq)%*%Theta
at=rbind(at,tmp)
}
#end of if(q >= (MAlag[1]+1)) statement
}
for(t in (q+1):nT){
Past=NULL
for (ii in 1:nlags){
jj=MAlag[ii]
Past=c(Past,at[t-jj,])
}
tmp=zt[t,]+matrix(Past,1,kq)%*%Theta
at=rbind(at,tmp)
}
#
sig=t(at)%*%at/nT
##ll=dmnorm(at,rep(0,k),sig)
ll=dmvnorm(at,rep(0,k),sig)
LLKvmas=-sum(log(ll))
LLKvmas
}

# Step 5: Estimate Parameters and Compute Numerically Hessian:
if(details){
fit = nlminb(start = par, objective = LLKvmas, zt=da, include.mean=include.mean, MAlag=MAlag, fixed=fixed,
lower = lowerBounds, upper = upperBounds, control = list(trace=3))
}
else {
fit = nlminb(start = par, objective = LLKvmas, zt=da, include.mean=include.mean, MAlag=MAlag, fixed=fixed,
lower = lowerBounds, upper = upperBounds)
}
epsilon = 0.0001 * fit\$par
npar=length(par)
Hessian = matrix(0, ncol = npar, nrow = npar)
for (i in 1:npar) {
for (j in 1:npar) {
x1 = x2 = x3 = x4  = fit\$par
x1[i] = x1[i] + epsilon[i]; x1[j] = x1[j] + epsilon[j]
x2[i] = x2[i] + epsilon[i]; x2[j] = x2[j] - epsilon[j]
x3[i] = x3[i] - epsilon[i]; x3[j] = x3[j] + epsilon[j]
x4[i] = x4[i] - epsilon[i]; x4[j] = x4[j] - epsilon[j]
Hessian[i, j] = (LLKvmas(x1,zt=da,include.mean=include.mean,MAlag=MAlag,fixed=fixed)
-LLKvmas(x2,zt=da,include.mean=include.mean,MAlag=MAlag,fixed=fixed)
-LLKvmas(x3,zt=da,include.mean=include.mean,MAlag=MAlag,fixed=fixed)
+LLKvmas(x4,zt=da,include.mean=include.mean,MAlag=MAlag,fixed=fixed))/
(4*epsilon[i]*epsilon[j])
}
}
est=fit\$par
cat("Final    Estimates: ",est,"\n")
# Step 6: Create and Print Summary Report:
se.coef = sqrt(diag(solve(Hessian)))
tval = fit\$par/se.coef
matcoef = cbind(fit\$par, se.coef, tval, 2*(1-pnorm(abs(tval))))
dimnames(matcoef) = list(names(tval), c(" Estimate",
" Std. Error", " t value", "Pr(>|t|)"))
cat("\nCoefficient(s):\n")
printCoefmat(matcoef, digits = 4, signif.stars = TRUE)

cat("---","\n")
cat("Estimates in matrix form:","\n")
icnt=0
ist=0
cnt=rep(0,k)
secnt=rep(1,k)
# handle the mean,if any.
if(include.mean){
ist=1
jdx=c(1:k)[fixed[1,]==1]
icnt=length(jdx)
if(icnt > 0){
cnt[jdx]=est[1:icnt]
secnt=se.coef[1:icnt]
cat("Constant term: ","\n")
cat("Estimates: ",cnt,"\n")
}
}
cat("MA coefficient matrix","\n")
TH=matrix(0,kq,k)
seTH=matrix(1,kq,k)
for (j in 1:k){
idx=c(1:kq)[fixed[(ist+1):nr,j]==1]
jcnt=length(idx)
if(jcnt > 0){
TH[idx,j]=est[(icnt+1):(icnt+jcnt)]
seTH[idx,j]=se.coef[(icnt+1):(icnt+jcnt)]
icnt=icnt+jcnt
}
}
#
####print(TH)
#
icnt=0
for (i in 1:nlags){
ii=MAlag[i]
cat("MA(",ii,")-matrix","\n")
theta=t(TH[(icnt+1):(icnt+k),])
print(theta,digits=3)
icnt=icnt+k
}
## Compute the residuals
zt=da
if(include.mean){
for (i in 1:k){
zt[,i]=zt[,i]-cnt[i]
}
}
at=zt[1:MAlag[1],]
if(MAlag[1]==1)at=matrix(at,1,k)
if(q >= (MAlag[1]+1)){
for(t in (MAlag[1]+1):q){
Past=NULL
for (ii in 1:nlags){
jj=MAlag[ii]
if((t-jj) > 0){
Past=c(Past,at[t-jj,])
}
else {
Past=c(Past,rep(0,k))
}
}
tmp=zt[t,]+matrix(Past,1,kq)%*%TH
at=rbind(at,tmp)
}
#end of the statement if(q > (MAlag[1]+1)
}
for(t in (q+1):nT){
Past=NULL
for (ii in 1:nlags){
jj=MAlag[ii]
Past=c(Past,at[t-jj,])
}
tmp=zt[t,]+matrix(Past,1,kq)%*%TH
at=rbind(at,tmp)
}
#
sig=t(at)%*%at/nT
cat(" ","\n")
cat("Residuals cov-matrix:","\n")
print(sig)
dd=det(sig)
d1=log(dd)
aic=d1+2*npar/nT
bic=d1+log(nT)*npar/nT
cat("---","\n")
cat("aic = ",aic,"\n")
cat("bic = ",bic,"\n")
###
Theta=t(TH)
if(include.mean){
TH=rbind(cnt,TH)
seTH=rbind(secnt,seTH)
}

VMAs <- list(data=da,MAlags=MAlag,cnst=include.mean,coef=TH,secoef=seTH,residuals=at,aic=aic,bic=bic,Sigma=sig,Theta=Theta,mu=cnt,MAorder=q,fixed=fixed)
}

####
"refVMAs" <- function(model,thres=2){
# This program refines a fittd VMAs model by removing insignificant parameters defined as
# abs(t-ratio) < thres.
#
# model: an output object from VMAs program
# thres: threshold
#
x = model\$data
malags = model\$MAlags
cnst = model\$cnst
coef=model\$coef
secoef=model\$secoef
nr=dim(coef)[1]
nc=dim(coef)[2]
for (i in 1:nr){
for (j in 1:nc){
if(secoef[i,j] < 10^(-8))secoef[i,j]=1.0
}
}
k=dim(x)[2]
nr=dim(coef)[1]
nc=dim(coef)[2]
fix=matrix(0,nr,k)
for (j in 1:k){
tt=coef[,j]/secoef[,j]
idx=c(1:nr)[abs(tt) >= thres]
if(length(idx) > 0)fix[idx,j]=1
}
### Try to keep the constant if the t-ratio is greater then 1.
if(cnst){
tt=coef[1,]/secoef[1,]
idx=c(1:nc)[abs(tt) > 1.0]
if(length(idx) > 0)fix[1,idx]=1
}

mm=VMAs(x,malags,include.mean=cnst,fixed=fix)

refVMAs <- list(data=x,MAlags=malags,cnst=cnst,coef=mm\$coef,secoef=mm\$secoef,residuals=mm\$residuals,aic=mm\$aic,bic=mm\$bic,Sigma=mm\$Sigma,Theta=mm\$Theta,mu=mm\$mu,MAorder=mm\$MAorder,fixed=fix)
}

#####
"VMApred" <- function(model,h=1,orig=0){
# Computes the i=1, 2, ..., h-step ahead predictions of a VMA(q) model.
#
# model is a VMA output object.
# created on April 20, 2011
#
x=model\$data
resi=model\$residuals
Theta=model\$Theta
sig=model\$Sigma
mu=model\$mu
q=model\$MAorder
np=dim(Theta)[2]
psi=-Theta
#
nT=dim(x)[1]
k=dim(x)[2]
if(orig <= 0)orig=nT
if(orig > T)orig=nT
if(length(mu) < 1)mu=rep(0,k)
if(q > orig){
cat("Too few data points to produce forecasts","\n")
}
pred=NULL
se=NULL
px=as.matrix(x[1:orig,])
for (j in 1:h){
fcst=mu
t=orig+j
for (i in 1:q){
jdx=(i-1)*k
t1=t-i
if(t1 <= orig){
theta=Theta[,(jdx+1):(jdx+k)]
fcst=fcst-matrix(resi[t1,],1,k)%*%t(theta)
}
}
px=rbind(px,fcst)
#
Sig=sig
if (j > 1){
jj=min(q,(j-1))
for (ii in 1:jj){
idx=(ii-1)*k
wk=psi[,(idx+1):(idx+k)]
Sig=Sig+wk%*%sig%*%t(wk)
}
}
se=rbind(se,sqrt(diag(Sig)))
}
cat("Forecasts at origin: ",orig,"\n")
print(px[(orig+1):(orig+h),],digits=4)
cat("Standard Errors of predictions: ","\n")
print(se[1:h,],digits=4)
pred=px[(orig+1):(orig+h),]
if(orig < nT){
cat("Observations, predicted values, and errors","\n")
tmp=NULL
jend=min(nT,(orig+h))
for (t in (orig+1):jend){
case=c(t,x[t,],px[t,],x[t,]-px[t,])
tmp=rbind(tmp,case)
}
colnames(tmp) <- c("time",rep("obs",k),rep("fcst",k),rep("err",k))
idx=c(1)
for (j in 1:k){
idx=c(idx,c(0,1,2)*k+j+1)
}
tmp = tmp[,idx]
print(tmp,digits=3)
}

VMApred <- list(pred=pred,se.err=se)
}

####
"VARMA" <- function(da,p=0,q=0,include.mean=T,fixed=NULL,beta=NULL,sebeta=NULL,prelim=F,details=F,thres=2.0){
# Estimation of a vector ARMA model using conditional MLE (Gaussian dist)
#
# When prelim=TRUE, fixed is assigned based on the results of AR approximation.
# Here "thres" is used only when prelim = TRUE.
#
if(!is.matrix(da))da=as.matrix(da)
nT=dim(da)[1];   k=dim(da)[2]
# basic setup.
if(p < 0)p=0
if(q < 0)q=0
if((p+q) < 1)p=1
pqmax=max(p,q)
kq=k*q
kp=k*p
#
iniEST <- function(y,x,p,q,include.mean){
# use residuals of a long VAR model to obtain initial estimates of
# VARMA coefficients.
if(!is.matrix(y))y=as.matrix(y)
if(!is.matrix(x))x=as.matrix(x)
nT=dim(y)[1]
k=dim(y)[2]
pq=max(p,q)
ist=1+pq
ne=nT-pq
if(include.mean){
xmtx=matrix(1,ne,1)
}
else {
xmtx=NULL
}
ymtx=as.matrix(y[ist:nT,])
if(p > 0){
for (j in 1:p){
xmtx=cbind(xmtx,y[(ist-j):(nT-j),])
}
}
if(q > 0){
for (j in 1:q){
xmtx=cbind(xmtx,x[(ist-j):(nT-j),])
}
}
xmtx=as.matrix(xmtx)
xtx=crossprod(xmtx,xmtx)
xty=crossprod(xmtx,ymtx)
xtxinv=solve(xtx)
beta=xtxinv%*%xty
resi= ymtx - xmtx%*%beta
sse=crossprod(resi,resi)/ne
dd=diag(xtxinv)
sebeta=NULL
for (j in 1:k){
se=sqrt(dd*sse[j,j])
sebeta=cbind(sebeta,se)
}
iniEST <- list(estimates=beta,se=sebeta)
}

if(length(fixed) < 1){
m1=VARorder(da,p+q+9,output=FALSE)
porder=m1\$aicor
if(porder < 1)porder=1
m2=VAR(da,porder,output=FALSE)
y=da[(porder+1):nT,]
x=m2\$residuals
m3=iniEST(y,x,p,q,include.mean)
beta=m3\$estimates
sebeta=m3\$se
nr=dim(beta)[1]
### Preliminary simplification
if(prelim){
fixed = matrix(0,nr,k)
for (j in 1:k){
tt=beta[,j]/sebeta[,j]
idx=c(1:nr)[abs(tt) >= thres]
fixed[idx,j]=1
}
}
if(length(fixed)==0){fixed=matrix(1,nr,k)}
}
# Identify parameters to be estimated.
par=NULL
separ=NULL
ist=0
if(include.mean){
jdx=c(1:k)[fixed[1,]==1]
if(length(jdx) > 0){
par=beta[1,jdx]
separ=sebeta[1,jdx]
}
ist=1
}
if(p > 0){
for (j in 1:k){
idx=c(1:kp)[fixed[(ist+1):(ist+kp),j]==1]
if(length(idx) > 0){
tmp=beta[(ist+1):(ist+kp),j]
setmp=sebeta[(ist+1):(ist+kp),j]
par=c(par,tmp[idx])
separ=c(separ,setmp[idx])
}
#end of j-loop
}
ist=ist+kp
}
#
if(q > 0){
for (j in 1:k){
idx=c(1:kq)[fixed[(ist+1):(ist+kq),j]==1]
if(length(idx) > 0){
tmp=beta[(ist+1):(ist+kq),j]
setmp=sebeta[(ist+1):(ist+kq),j]
par=c(par,tmp[idx])
separ=c(separ,setmp[idx])
}
}
}
#########
cat("Number of parameters: ",length(par),"\n")
cat("initial estimates: ",round(par,4),"\n")
### Set up lower and upper bounds
lowerBounds=par; upperBounds=par
for (j in 1:length(par)){
lowerBounds[j] = par[j]-2*separ[j]
upperBounds[j] = par[j]+2*separ[j]
}
cat("Par. lower-bounds: ",round(lowerBounds,4),"\n")
cat("Par. upper-bounds: ",round(upperBounds,4),"\n")
#### likelihood function
LLKvarma <- function(par,zt=da,p=p,q=q,include.mean=include.mean,fixed=fixed){
##
nT=dim(zt)[1]; k=dim(zt)[2]
pqmax=max(p,q)
kp=k*p
kq=k*q
###  Assign parameters to their proper locations in the program.
beta=NULL
ist=0
icnt=0
Ph0=rep(0,k)
if(include.mean){
idx=c(1:k)[fixed[1,]==1]
icnt=length(idx)
if(icnt > 0){
Ph0[idx]=par[1:icnt]
}
ist=1
beta=rbind(beta,Ph0)
}
PH=NULL
if(p > 0){
PH = matrix(0,kp,k)
for (j in 1:k){
idx=c(1:kp)[fixed[(ist+1):(ist+kp),j]==1]
jdx=length(idx)
if(jdx > 0){
PH[idx,j]=par[(icnt+1):(icnt+jdx)]
icnt=icnt+jdx
}
# end of j-loop
}
ist=ist+kp
beta=rbind(beta,PH)
#end of if (p > 0)
}
#
TH=NULL
if(q > 0){
TH=matrix(0,kq,k)
for (j in 1:k){
idx=c(1:kq)[fixed[(ist+1):(ist+kq),j]==1]
jdx=length(idx)
if(jdx > 0){
TH[idx,j]=par[(icnt+1):(icnt+jdx)]
icnt=icnt+jdx
}
# end of j-loop
}
beta=rbind(beta,TH)
# end of if(q > 0).
}
### recursively compute the residuals
istart=pqmax+1
#### consider the case t from 1 to pqmatx
at=matrix((zt[1,]-Ph0),1,k)
if(pqmax > 1){
for (t in 2:pqmax){
tmp=matrix((zt[t,]-Ph0),1,k)
if(p > 0){
for (j in 1:p){
if((t-j) > 0){
jdx=(j-1)*k
tmp1=matrix(zt[(t-j),],1,k)%*%as.matrix(PH[(jdx+1):(jdx+k),])
tmp=tmp-tmp1
}
# end of j-loop
}
# end of if(p > 0) statement
}
#
if(q > 0){
for (j in 1:q){
jdx=(j-1)*k
if((t-j)>0){
tmp2=matrix(at[(t-j),],1,k)%*%as.matrix(TH[(jdx+1):(jdx+k),])
tmp=tmp-tmp2
}
#end of j-loop
}
#end of if(q > 0) statement
}
at=rbind(at,tmp)
}
# end of if(pqmax > 1) statement
}
### for t from ist on
Pcnt = NULL
idim=kp+kq
if(include.mean){
Pcnt=c(1)
idim=idim+1
}
for (t in istart:nT){
Past=NULL
if(p > 0){
for (j in 1:p){
Past=c(Past,zt[(t-j),])
}
}
if(q > 0){
for (j in 1:q){
Past=c(Past,at[(t-j),])
}
}
tmp = matrix(c(Pcnt,Past),1,idim)%*%beta
tmp3=zt[t,]-tmp
at=rbind(at,tmp3)
}
#### skip the first max(p,q) residuals.
at=at[(istart:nT),]
sig=t(at)%*%at/(nT-pqmax)
#ll=dmnorm(at,rep(0,k),sig)
ll=dmvnorm(at,rep(0,k),sig)
LLKvarma=-sum(log(ll))
LLKvarma
}

# Step 5: Estimate Parameters and Compute Numerically Hessian:
if(details){
fit = nlminb(start = par, objective = LLKvarma,zt=da,p=p,q=q,include.mean=include.mean,fixed=fixed,
lower = lowerBounds, upper = upperBounds, control = list(trace=3))
}
else {
fit = nlminb(start = par, objective = LLKvarma, zt=da,p=p,q=q,include.mean=include.mean,fixed=fixed,
lower = lowerBounds, upper = upperBounds)
}
epsilon = 0.0001 * fit\$par
npar=length(par)
Hessian = matrix(0, ncol = npar, nrow = npar)
for (i in 1:npar) {
for (j in 1:npar) {
x1 = x2 = x3 = x4  = fit\$par
x1[i] = x1[i] + epsilon[i]; x1[j] = x1[j] + epsilon[j]
x2[i] = x2[i] + epsilon[i]; x2[j] = x2[j] - epsilon[j]
x3[i] = x3[i] - epsilon[i]; x3[j] = x3[j] + epsilon[j]
x4[i] = x4[i] - epsilon[i]; x4[j] = x4[j] - epsilon[j]
Hessian[i, j] = (LLKvarma(x1,zt=da,p=p,q=q,include.mean=include.mean,fixed=fixed)
-LLKvarma(x2,zt=da,p=p,q=q,include.mean=include.mean,fixed=fixed)
-LLKvarma(x3,zt=da,p=p,q=q,include.mean=include.mean,fixed=fixed)
+LLKvarma(x4,zt=da,p=p,q=q,include.mean=include.mean,fixed=fixed))/
(4*epsilon[i]*epsilon[j])
}
}
est=fit\$par
cat("Final   Estimates: ",est,"\n")
# Step 6: Create and Print Summary Report:
se.coef = sqrt(diag(solve(Hessian)))
tval = fit\$par/se.coef
matcoef = cbind(fit\$par, se.coef, tval, 2*(1-pnorm(abs(tval))))
dimnames(matcoef) = list(names(tval), c(" Estimate",
" Std. Error", " t value", "Pr(>|t|)"))
cat("\nCoefficient(s):\n")
printCoefmat(matcoef, digits = 4, signif.stars = TRUE)
#
### restore estimates to the format of unconstrained case for printing purpose.
#### icnt: parameter count
#### ist: location count
ist=0
icnt = 0
Ph0=rep(0,k)
sePh0=rep(0,k)
beta=NULL
sebeta=NULL
if(include.mean){
idx=c(1:k)[fixed[1,]==1]
icnt=length(idx)
if(icnt > 0){
Ph0[idx]=est[1:icnt]
sePh0[idx]=se.coef[1:icnt]
}
ist=1
beta=rbind(beta,Ph0)
sebeta=rbind(sebeta,sePh0)
}
PH=NULL
sePH=NULL
if(p > 0){
PH=matrix(0,kp,k)
sePH=matrix(0,kp,k)
for (j in 1:k){
idx=c(1:kp)[fixed[(ist+1):(ist+kp),j]==1]
jdx=length(idx)
if(jdx > 0){
PH[idx,j]=est[(icnt+1):(icnt+jdx)]
sePH[idx,j]=se.coef[(icnt+1):(icnt+jdx)]
icnt=icnt+jdx
}
# end of j-loop
}
#end of if (p > 0)
ist=ist+kp
beta=rbind(beta,PH)
sebeta=rbind(sebeta,sePH)
}
#
TH=NULL
seTH=NULL
if(q > 0){
TH=matrix(0,kq,k)
seTH=matrix(0,kq,k)
for (j in 1:k){
idx=c(1:kq)[fixed[(ist+1):(ist+kq),j]==1]
jdx=length(idx)
if(jdx > 0){
TH[idx,j]=est[(icnt+1):(icnt+jdx)]
seTH[idx,j]=se.coef[(icnt+1):(icnt+jdx)]
icnt=icnt+jdx
}
# end of j-loop
}
# end of if(q > 0).
beta=rbind(beta,TH)
sebeta=rbind(sebeta,seTH)
}
#########
cat("---","\n")
cat("Estimates in matrix form:","\n")
if(include.mean){
cat("Constant term: ","\n")
cat("Estimates: ",Ph0,"\n")
}
if(p > 0){
cat("AR coefficient matrix","\n")
jcnt=0
for (i in 1:p){
cat("AR(",i,")-matrix","\n")
ph=t(PH[(jcnt+1):(jcnt+k),])
print(ph,digits=3)
jcnt=jcnt+k
}
# end of if (p > 0)
}
if(q > 0){
cat("MA coefficient matrix","\n")
icnt=0
for (i in 1:q){
cat("MA(",i,")-matrix","\n")
theta=-t(TH[(icnt+1):(icnt+k),])
print(theta,digits=3)
icnt=icnt+k
}
# end of the statement if(q > 0)
}
##### Compute the residuals
zt=da
ist=pqmax+1
#### consider the case t from 1 to pqmatx
at=matrix((zt[1,]-Ph0),1,k)
if(pqmax > 1){
for (t in 2:pqmax){
tmp=matrix((zt[t,]-Ph0),1,k)
if(p > 0){
for (j in 1:p){
if((t-j) > 0){
jdx=(j-1)*k
tmp1=matrix(zt[(t-j),],1,k)%*%as.matrix(PH[(jdx+1):(jdx+k),])
tmp=tmp-tmp1
}
# end of j-loop
}
# end of if(p > 0) statement
}
#
if(q > 0){
for (j in 1:q){
jdx=(j-1)*k
if((t-j)>0){
tmp2=matrix(at[(t-j),],1,k)%*%as.matrix(TH[(jdx+1):(jdx+k),])
tmp=tmp-tmp2
}
#end of j-loop
}
#end of if(q > 0) statement
}
at=rbind(at,tmp)
# end of for(t in 2:pqmax)
}
# end of if(pqmax > 1) statement
}

### for t from ist on
Pcnt = NULL
idim=kp+kq
if(include.mean){
Pcnt=c(1)
idim=idim+1
}
#
for (t in ist:nT){
Past=NULL
if(p > 0){
for (j in 1:p){
Past=c(Past,zt[(t-j),])
}
}
if(q > 0){
for (j in 1:q){
Past=c(Past,at[(t-j),])
}
}
tmp = matrix(c(Pcnt,Past),1,idim)%*%beta
tmp3=zt[t,]-tmp
at=rbind(at,tmp3)
}
#### skip the first max(p,q) residuals.
at=at[(ist:nT),]
sig=t(at)%*%at/(nT-pqmax)
##
cat(" ","\n")
cat("Residuals cov-matrix:","\n")
print(sig)
dd=det(sig)
d1=log(dd)
aic=d1+2*npar/nT
bic=d1+log(nT)*npar/nT
cat("----","\n")
cat("aic= ",aic,"\n")
cat("bic= ",bic,"\n")
if(length(PH) > 0)PH=t(PH)
if(length(TH) > 0)TH=-t(TH)

VARMA <- list(data=da,ARorder=p,MAorder=q,cnst=include.mean,coef=beta,secoef=sebeta,residuals=at,Sigma=sig,aic=aic,bic=bic,Phi=PH,Theta=TH,Ph0=Ph0)
}

####
"refVARMA" <- function(model,thres=1.5){
# This program refines the fitted models of VARMA output by removing
# insigificant parameters with abs(t-ratio) < thres.
# model: output object from VARMA
# thres: threshold value
#
x = model\$data
p1 = model\$ARorder
q1 = model\$MAorder
cnst = model\$cnst
coef=as.matrix(model\$coef)
secoef=as.matrix(model\$secoef)
nr=dim(coef)[1]
nc=dim(coef)[2]
for (j in 1:nc){
idx=is.na(secoef[,j])
jdx=c(1:nr)[idx==T]
secoef[jdx,j]=0.01
}
fix=matrix(0,nr,nc)
for (j in 1:nc){
tt=coef[,j]/secoef[,j]
idx=c(1:nr)[abs(tt) >= thres]
fix[idx,j]=1
}
### Try to keep the constant if the t-ratio is greater then 1.
if(cnst){
tt=coef[1,]/secoef[1,]
idx=c(1:nc)[abs(tt) > 1.0]
if(length(idx) > 0)fix[1,idx]=1
}

mm=VARMA(x,p=p1,q=q1,include.mean=cnst,fixed=fix,beta=coef,sebeta=secoef)

refVARMA <- list(data=x,coef=mm\$coef,secoef=mm\$secoef,ARorder=p1,MAorder=q1,cnst=cnst,residuals=mm\$residuals,Ph0=mm\$Ph0,Phi=mm\$Phi,Theta=mm\$Theta,Sigma=mm\$Sigma,aic=mm\$aic,bic=mm\$bic)
}
######
"VARMApred" <- function(model,h=1,orig=0){
## Compute forecasts and forecast error covariance of a VARMA mdoel.
## created April 21, 2011 by Ruey S. Tsay
#
# model: an output from VARMA command.
#
x=as.matrix(model\$data)
resi=as.matrix(model\$residuals)
sig=model\$Sigma
Phi=model\$Phi
Theta=model\$Theta
Ph0=model\$Ph0
p=model\$ARorder
q=model\$MAorder
#
if(p < 0)p=0
if(q < 0)q=0
if(h < 1)h=1
nT=dim(x)[1]
k=dim(x)[2]
T1=dim(resi)[1]
## In case the residuals is shorter due to conditional MLE estimation.
if(nT > T1){
r1=matrix(0,(nT-T1),k)
resi=rbind(r1,resi)
}
#
if(length(Ph0) < 1)Ph0=rep(0,k)
if(orig < 1)orig=nT
if(orig > T)orig=nT
px=x[1:orig,]
presi=resi[1:orig,]
# Compute the psi-weights for the variance of forecast errors.
psi=diag(rep(1,k))
wk=c(psi)
lag=max(1,h)
#
for (i in 1:lag){
if (i <= p){
idx=(i-1)*k
tmp=Phi[,(idx+1):(idx+k)]
}
else{
tmp=matrix(0,k,k)
}
if(i <= q){
mdx=(i-1)*k
tmp=tmp-Theta[,(mdx+1):(mdx+k)]
}
#
jj=i-1
jp=min(jj,p)
if(jp > 0){
for(j in 1:jp){
jdx=(j-1)*k
idx=(i-j)*k
w1=Phi[,(jdx+1):(jdx+k)]
w2=psi[,(idx+1):(idx+k)]
tmp=tmp+w1%*%w2
##print(tmp,digits=4)
}
}
psi=cbind(psi,tmp)
wk=cbind(wk,c(tmp))
##print(psi,digits=3)
}
### Compute the forecasts and their standard errors
sefcst=NULL
for (j in 1:h){
fcst=Ph0
Sig=sig
t=orig+j
### AR part
if(p > 0){
for (ii in 1:p){
idx=(ii-1)*k
ph=Phi[,(idx+1):(idx+k)]
fcst=fcst + matrix(px[(t-ii),],1,k)%*%t(ph)
}
#end of AR part
}
### MA part
if(q > 0){
for (jj in 1:q){
idx=(jj-1)*k
if((t-jj) <= orig){
th=Theta[,(idx+1):(idx+k)]
fcst=fcst - matrix(resi[(t-jj),],1,k)%*%t(th)
}
# end of jj-loop
}
# end of MA part
}
px=rbind(px,fcst)
# compute standard errors of forecasts
if(j > 1){
Sig=sig
for (jj in 2:j){
jdx=(jj-1)*k
wk=psi[,(jdx+1):(jdx+k)]
Sig=Sig + wk%*%sig%*%t(wk)
}
}
sefcst=rbind(sefcst,sqrt(diag(Sig)))
}
cat("Predictions at origin ",orig,"\n")
print(px[(orig+1):(orig+h),],digits=4)
cat("Standard errors of predictions","\n")
if(h == 1){
print(sefcst,digits=4)
}
else {
print(sefcst[1:h,],digits=4)
}
#### if orig < nT, print out actual values.
if(orig < nT){
cat("Observations, predictions, and errors: ","\n")
tmp=NULL
jend=min(nT,orig+h)
for (t in (orig+1):jend){
case=c(t,x[t,],px[t,],x[t,]-px[t,])
tmp=rbind(tmp,case)
}
colnames(tmp) <- c("time",rep("obs",k),rep("fcst",k),rep("err",k))
idx=c(1)
for (j in 1:k){
idx=c(idx,c(0,1,2)*k+j+1)
}
tmp = tmp[,idx]
print(tmp,digits=4)
}

VARMApred <- list(pred=px[(orig+1):(orig+h),],se.err=sefcst,orig=orig)
#end of the program
}

###
"VARecm" <- function(x,p=1,wt,include.const=FALSE){
# Fits an error-correction VAR model.
if(!is.matrix(x))x=as.matrix(x)
nT=dim(x)[1]
k=dim(x)[2]
dx=x[2:nT,]-x[1:(nT-1),]
dx=rbind(rep(0,k),dx)
idm=k*(p-1)+1
if(include.const)idm=idm+1
# effective sample size
ist=max(1,p)
ne=nT-ist+1
y=dx[ist:nT,]
if(include.const)xmtx=cbind(xmtx,rep(1,(nT-ist+1)))
if(p > 1){
for (i in 2:p){
ii=i-1
xmtx=cbind(xmtx,dx[(ist-ii):(nT-ii),])
}
}
y=as.matrix(y)
xmtx=as.matrix(xmtx)
xpx = t(xmtx)%*%xmtx
xpxinv=solve(xpx)
xpy=t(xmtx)%*%y
beta=xpxinv%*%xpy
yhat=xmtx%*%beta
resi=y-yhat
sse=(t(resi)%*%resi)/ne
alpha=beta[1,]
icnt=1
if(include.const){
c=beta[2,]
icnt=2
}
dd=diag(xpxinv)
sdbeta=matrix(0,idm,k)
for (i in 1:k){
sdbeta[,i]=sqrt(sse[i,i]*dd)
}
se=sdbeta[1,]
cat("alpha: ","\n")
print(alpha,digits=3)
cat("standard error","\n")
print(se,digits=3)
if(include.const){
cat("constant term:","\n")
print(c,digits=3)
se=sdbeta[2,]
cat("standard error","\n")
print(se,digits=3)
}

cat("AR coefficient matrix","\n")
jst=icnt
for (i in 1:(p-1)){
cat("AR(",i,")-matrix","\n")
phi=t(beta[(jst+1):(jst+k),])
se=t(sdbeta[(jst+1):(jst+k),])
print(phi,digits=3)
cat("standard error","\n")
print(se,digits=3)
jst=jst+k
###cat("      ","\n")
}
cat("-----","\n")
cat("Residuals cov-mtx:","\n")
print(sse)
#sse=sse*ne/T
cat("      ","\n")
dd=det(sse)
cat("det(sse) = ",dd,"\n")
d1=log(dd)
aic=d1+(2*idm*k)/nT
bic=d1+log(nT)*idm*k/nT
cat("AIC = ",aic,"\n")
cat("BIC = ",bic,"\n")

VARecm<-list(coef=beta,aic=aic,bic=bic,residuals=resi,secoef=sdbeta,Sigma=sse)
}

##### Mmodel checking
# perform model checking for a multivariate time series model.
# m1 is a VARMA, VMA, VAR type of models.
#
# adj: number of coefficient parameters in the fitted model.(without counting
#       those in the mean and covariance matrix)
# level: switch to print residual CCM matrices.
###
resi=model\$residuals
colnames(resi) <- colnames(model\$data)
ccm(resi,lags=gof,level=level)
cat("Hit Enter to compute MQ-statistics:","\n")
cat("Hit Enter to obtain residual plots:","\n")
MTSplot(resi)
}

####
"tfm" <- function(y,x,b=0,s=1,p=0,q=0){
# Estimate a special transfer function model. Specifically,
# fit an ARMA(p,q) model to [y -(w0+w1*B+w2*B**2+...+ws*B**s)B**b x].
# b: delay
# s: order of the transfer function polynomial.
# note: Length(y) == length(x) & Missing values are not allowed.
#
# Created by R.S. Tsay, March 2009
#
nT=length(y)
T1=length(x)
nT=min(nT,T1)
mx=b+s
ist=mx+1
y1=y[ist:nT]
X=x[(s+1):(nT-b)]
if(s > 0){
for (i in 1:s){
X=cbind(X,x[(s+1-i):(nT-b-i)])
}
nx=ncol(X)
m1=arima(y1,order=c(p,0,q),xreg=X)
}
if(s == 0){
nx = 1
m1 = arima(y1,order=c(p,0,q),xreg=X)
}
se=sqrt(diag(m1\$var.coef))
coef.arma=NULL
se.arma=NULL
pq=p+q
if(pq > 0){
coef.arma=m1\$coef[1:pq]
se.arma=se[1:pq]
p1=cbind(coef.arma,se.arma)
cat("ARMA coefficients & s.e.:","\n")
print(t(p1),digits=3)
}
v=m1\$coef[(pq+1):(pq+1+nx)]
se.v=se[(pq+1):(pq+1+nx)]
pr=cbind(v,se.v)
cat("Transfer function coefficients & s.e.:","\n")
print(t(pr),digits=3)
res=m1\$residuals
beta=matrix(v[2:(nx+1)],nx,1)
nt=y1-v[1]-X%*%beta

tfm <- list(coef=v,se.coef=se.v,coef.arma=coef.arma,se.arma=se.arma,nt=nt,residuals=res)
}

####
"tfm1" <- function(y,x,orderN,orderX){
## Estimation of a transfer function model with ONE exogenous variable
### The model is Y_t -c0 -w(B)/d(B)X_t = theta(B)/phi(B)a_t.
### orderN = c(p,d,q) for the ARMA part
### orderX = c(r,s,b) where d(B) = 1 - d_1B - ... - d_r B^r
###            and w(B) = w_0+w_1B + ... + w_s B^s and b is the delay.
###
### par=c(c0,w0,w1,...,ws,d1,...,dr,phi,theta); Feb. 2012.
###
dify = orderN[2]; dY=y; dX=x
if(dify > 0){
dY <- y[(dify+1):length(y)]-y[1:(length(y)-dify)]
dX <- x[(dify+1):length(x)]-x[1:(length(x)-dify)]
}
N = length(dY); N1=length(dX)
if(N < N1) N1=N; if(N1 < N)N=N1
phi=NULL; theta=NULL; ome=NULL; del=NULL
r=orderX[1]; s=orderX[2]; b=orderX[3]; p=orderN[1]; q=orderN[3]
r=max(r,0); s=max(0,s); b=max(0,b); p=max(0,p); q=max(0,q)
### subroutines used
Nlike <- function(par,dY=dY,dX=dX,orderN=orderN,orderX=orderX){
resi = Gaulike(par,dY=dY,dX=dX,orderN=orderN,orderX=orderX)
sig=sqrt(var(resi))
n1=length(resi)
Nlike=-sum(log(dnorm(resi,mean=rep(0,n1),sd=sig)))
}

Gaulike <- function(par,dY=dY,dX=dX,orderN=orderN,orderX=orderX){
p=orderN[1]; q=orderN[3]; r=orderX[1]; s=orderX[2]; b=orderX[3]
c0=par[1]
ome=par[2:(2+s)]
#
if(r > 0)del=par[(2+s+1):(2+s+r)]
if(p > 0)phi=par[(3+r+s):(r+s+2+p)]
if(q > 0)theta=par[(3+r+s+p):(2+r+s+p+q)]
N=length(dY)
ist=r+1
N1t=dY-c0
Nt = dX
if(r > 0){
Nt=filter(dX,del,method="r",init=rep(mean(dX),r))
}
##
ist=b+s+1
N=length(Nt)
N1t=N1t[ist:N]-ome[1]*Nt[(ist-b):(N-b)]
if(s > 0){
for (j in 1:s){
N1t=N1t-ome[j+1]*Nt[(ist-j-b):(N-j-b)]
}
}
N1=length(N1t)
resi=N1t[(p+1):N1]
if(p > 0){
for (j in 1:p){
resi=resi-phi[j]*N1t[(p+1-j):(N1-j)]
}
}
#
if(q > 0)resi=filter(resi,theta,method="r",init=rep(0,q))
Gaulike = resi
}

### Obtain the N(t) series
Nts <- function(par,dY=dY,dX=dX,orderN=orderN,orderX=orderX){
p=orderN[1]; q=orderN[3]; r=orderX[1]; s=orderX[2]; b=orderX[3]
c0=par[1]
ome=par[2:(2+s)]
#
if(r > 0)del=par[(2+s+1):(2+s+r)]
N=length(dY)
ist=r+1
N1t=dY-c0
Nt=dX
if(r > 0){
Nt=filter(dX,del,method="r",init=rep(mean(dX),r))
}
##
ist=b+s+1
N=length(Nt)
N1t=N1t[ist:N]-ome[1]*Nt[(ist-b):(N-b)]
if(s > 0){
for (j in 1:s){
N1t=N1t-ome[j+1]*Nt[(ist-j-b):(N-j-b)]
}
}
Nts=N1t
}
####
## r = 0, the model can be fitted by the regular "arima" command.
if(r==0){
nobe=N-s-b
Y=dY[(s+1+b):N]
X=dX[(s+1):(N-b)]
if(s > 0){
for (j in 1:s){
X=cbind(X,dX[(s+1-j):(N-b-j)])
}
}
m1=arima(Y,order=c(p,0,q),xreg=X)
est=m1\$coef; sigma2=m1\$sigma2; residuals=m1\$residuals; varcoef=m1\$var.coef
nx=dim(X)[2]
se=sqrt(diag(m1\$var.coef))
coef.arma=NULL
se.arma=NULL
pq=p+q
if(pq > 0){
coef.arma=est[1:pq]
se.arma=se[1:pq]
p1=cbind(coef.arma,se.arma)
cat("ARMA coefficients & s.e.:","\n")
print(t(p1),digits=3)
}
v=est[(pq+1):(pq+1+nx)]
se.v=se[(pq+1):(pq+1+nx)]
pr=cbind(v,se.v)
cat("Transfer function coefficients & s.e.:","\n")
print(t(pr),digits=3)
}
else{
ist=max(r,s)+1+b
par=c(mean(dY[ist:N]))
par=c(par,rep(0.1,s+1))
par=c(par,rep(0.1,r))
if(p > 0)par=c(par,rep(0.1,p))
if(q > 0)par=c(par,rep(0.01,q))
m11=nlm(Nlike,par,hessian=TRUE,dY=dY,dX=dX,orderN=orderN,orderX=orderX)
est=m11\$estimate
varcoef=solve(m11\$hessian)
se=sqrt(diag(varcoef))
residuals=Gaulike(est,dY=dY,dX=dX,orderN=orderN,orderX=orderX)
sigma2=var(residuals)
pq=p+q
npar=length(est)
v=est[1:(npar-pq)]
se.v=se[1:(npar-pq)]
pr=cbind(v,se.v)
cat("Delay: ",b,"\n")
cat("Transfer function coefficients & s.e.:","\n")
cat("in the order: constant, omega, and delta:",c(1,s+1,r),"\n")
print(t(pr),digits=3)
if(pq > 0){
coef.arma=est[(npar-pq+1):npar]
se.arma=se[(npar-pq+1):npar]
p1=cbind(coef.arma,se.arma)
cat("ARMA order:","\n")
print(c(p,dify,q))
cat("ARMA coefficients & s.e.:","\n")
print(t(p1),digits=3)
}
#
}
Nt = Nts(est,dY=dY,dX=dX,orderN=orderN,orderX=orderX)

tfm1 <- list(estimate=est,sigma2=sigma2,residuals=residuals,varcoef=varcoef, Nt=Nt)
}

"tfm2" <- function(y,x,x2=NULL,ct=NULL,wt=NULL,orderN=c(1,0,0),orderS=c(0,0,0),sea=12,order1=c(0,1,0),order2=c(0,-1,0)){
## Estimation of a transfer function model with TWO exogenous variables
### The model is Y_t- c0 -c1*c_t -c2*w_t - w(B)/d(B)X_t  - W(b)/D(B)X_{2t} = theta(B)*Theta(B)/[phi(B)*Phi(B)]a_t.
### orderN = c(p,d,q) for the regular ARMA part
### orderS = c(P,D,Q) for the seasonal ARMA part
### order1 = c(r,s,b) where d(B) = 1 - d_1B - ... - d_r B^r
###            and w(B) = w_0+w_1B + ... + w_s B^s and b is the delay.
###
### order2 = c(r2,s2,b2) for the second exogenous variable
### wt: for co-integrated system
### ct: a given determinsitic variable such as time trend
### par=c(c0,w0,w1,...,ws,d1,...,dr,c1,c2,W0, ...,Ws,D1,...,Dr,phi,theta,Phi,Theta): November 2014
###
dify = orderN[2]; dY=y; dX=x; dX2=x2; dW=wt; dC=ct
phi=NULL; theta=NULL; Phi=NULL; Theta=NULL; omega=NULL; delta=NULL; Omega=NULL; Delta=NULL
if(dify > 0){
dY <- y[(dify+1):length(y)]-y[1:(length(y)-dify)]
dX <- x[(dify+1):length(x)]-x[1:(length(x)-dify)]
if(!is.null(x2)){
dX2 <- x2[(dify+1):length(x2)]-x2[1:(length(x2)-dify)]
}
if(!is.null(wt)){
dW <- wt[(dify+1):length(wt)] - wt[1:(length(wt)-dify)]
}
if(!is.null(ct)){
dC <- ct[(dify+1):length(ct)] - ct[1:(length(ct)-dify)]
}
}
### seasonal difference, if any
difys = orderS[2]; lags=difys*sea
if(difys > 0){
dY <- dY[(lags+1):length(dY)]-dY[1:(length(dY)-lags)]
dX <- dX[(lags+1):length(dX)]-dX[1:(length(dX)-lags)]
if(!is.null(x2)){
dX2 <- dX2[(lags+1):length(dX2)]-dX2[1:(length(dX2)-lags)]
}
if(!is.null(wt)){
dW <- dW[(lags+1):length(dW)] - dW[1:(length(dW)-lags)]
}
if(!is.null(ct)){
dC <- dC[(lags+1):length(dC)] - dC[1:(length(dC)-lags)]
}
}
#
N = length(dY); N1=length(dX)
N=min(N,N1)
if(length(dX2) > 0)N=min(N,length(dX2))
if(length(dW) > 0) N=min(N,length(dW))
if(length(dC) > 0) N=min(N,length(dC))
phi=NULL; theta=NULL; ome=NULL; del=NULL; ome2=NULL; del2=NULL; Phi=NULL; Theta=NULL
r=order1[1]; s=order1[2]; b=order1[3]; p=orderN[1]; q=orderN[3]; P=orderS[1]; Q=orderS[3]
r=max(r,0); s=max(0,s); b=max(0,b); p=max(0,p); q=max(0,q); P=max(0,P); Q=max(0,Q)
r2=order2[1]; s2=order2[2]; b2=order2[3]
### subroutines used
Nlike <- function(par,dY=dY,dX=dX,dX2=dX2,dW=dW,dC=dC,orderN=orderN,orderS=orderS,sea=sea,order1=order1,order2=order2){
resi = Gaulike(par,dY=dY,dX=dX,dX2=dX2,dW=dW,dC=dC,orderN=orderN,orderS=orderS,sea=sea,order1=order1,order2=order2)
sig=sqrt(var(resi))
n1=length(resi)
Nlike=-sum(dnorm(resi,mean=rep(0,n1),sd=sig,log=TRUE))
}

Gaulike <- function(par,dY=dY,dX=dX,dX2=dX2,dW=dW,dC=dC,orderN=orderN,orderS=orderS,sea=sea,order1=order1,order2=order2){
p=orderN[1]; q=orderN[3]; r=order1[1]; s=order1[2]; b=order1[3]; P=orderS[1]; Q=orderS[3]
r2=order2[1]; s2=order2[2]; b2=order2[3]
c0=par[1]
ome=par[2:(2+s)]
#
if(r > 0)del=par[(2+s+1):(2+s+r)]
icnt=2+s+r
if(!is.null(dC)){c1=par[icnt+1]
icnt=icnt+1
}
if(!is.null(dW)){c2=par[icnt+1]
icnt=icnt+1
}
if(!is.null(dX2)){
ome2=par[(icnt+1):(icnt+1+s2)]
icnt=icnt+1+s2
if(r2 > 0){del2=par[(icnt+1):(icnt+r2)]
icnt=icnt+r2
}
}
if(p > 0){phi=par[(icnt+1):(icnt+p)]
icnt=icnt+p
}
if(q > 0){theta=par[(icnt+1):(icnt+q)]
icnt=icnt+q
}
if(P > 0){Phi=par[(icnt+1):(icnt+P)]
icnt=icnt+P
}
if(Q > 0)Theta=par[(icnt+1):(icnt+Q)]
#
N=length(dY)
N1t=dY-c0
Nt = dX
if(r > 0){
Nt=filter(dX,del,method="r",init=rep(mean(dX),r))
}
##
ist=max(b+s+1,b2+s2+1)
N=length(Nt)
N1t=N1t[ist:N]-ome[1]*Nt[(ist-b):(N-b)]
if(s > 0){
for (j in 1:s){
N1t=N1t-ome[j+1]*Nt[(ist-j-b):(N-j-b)]
}
}
if(!is.null(dC))N1t=N1t-c1*dC[ist:N]
if(!is.null(dW))N1t=N1t-c2*dW[ist:N]
if(!is.null(dX2)){
Zt=dX2
if(r2 > 0){
Zt=filter(dX2,del2,method="r",init=rep(mean(dX2),r2))
}
N1t=N1t - ome2[1]*Zt[(ist-b2):(N-b2)]
if(s2 > 0){
for (j in 1:s2){
N1t=N1t-ome2[j+1]*Zt[(ist-j-b2):(N-j-b2)]
}
}
}
N1=length(N1t)
re=N1t[(p+1):N1]
if(p > 0){
for (j in 1:p){
re=re-phi[j]*N1t[(p+1-j):(N1-j)]
}
}
#
if(q > 0)re=filter(re,theta,method="r",init=rep(0,q))
N1=length(re)
resi=re[(P*sea+1):N1]
if(P > 0){
for (j in 1:P){
resi=resi-Phi[j]*re[(P*sea+1-j*sea):(N1-j*sea)]
}
}
if(Q > 0){
f1=rep(0,sea*Q)
for (j in 1:Q){
f1[j*sea]=Theta[j]
}
resi=filter(resi,f1,method="r",init=rep(0,sea*Q))
}
Gaulike = resi
}

### Obtain the N(t) series
Nts <- function(par,dY=dY,dX=dX,dX2=dX2,dW=dW,dC=dC,order1=order1,order2=order2){
r=order1[1]; s=order1[2]; b=order1[3]
r2=order2[1]; s2=order2[2]; b2=order2[3]
c0=par[1]
ome=par[2:(2+s)]
icnt=2+s
#
if(r > 0){del=par[(2+s+1):(2+s+r)]
icnt=2+s+r
}
if(!is.null(dC)){c1=par[icnt+1]
icnt=icnt+1
}
if(!is.null(dW)){c2=par[icnt+1]
icnt=icnt+1
}
if(!is.null(dX2)){
ome2=par[(icnt+1):(icnt+1+s2)]
icnt=icnt+1+s2
if(r2 > 0){
del2=par[(icnt+1):(icnt+r2)]
icnt=icnt+r2
}
}
N=length(dY)
N1t=dY-c0
Nt=dX
if(r > 0){
Nt=filter(dX,del,method="r",init=rep(mean(dX),r))
}
##
ist=max(b+s+1,b2+s2+1)
N=length(Nt)
N1t=N1t[ist:N]-ome[1]*Nt[(ist-b):(N-b)]
if(s > 0){
for (j in 1:s){
N1t=N1t-ome[j+1]*Nt[(ist-j-b):(N-j-b)]
}
}
if(!is.null(dC))N1t=N1t-c1*dC[ist:N]
if(!is.null(dW)){N1t=N1t-c2*dW[ist:N]}
if(!is.null(dX2)){
Zt=dX2
if(r2 > 0){
Zt=filter(Zt,del2,method="r",init=rep(mean(dX2),r2))
}
N1t=N1t - ome2[1]*Zt[(ist-b2):(N-b2)]
if(s2 > 0){
for (j in 1:s2){
N1t=N1t-ome2[j+1]*Zt[(ist-b2-j):(N-b2-j)]
}
}
}
Nts=N1t
}
####
## r = 0 && r2=0, the model can be fitted by the regular "arima" command.
if((r==0) && (r2==0)){
ist=max(s+b,s2+b2)+1
nobe=N-ist+1
Y=dY[ist:N]
X=dX[(ist-b):(N-b)]
if(s > 0){
for (j in 1:s){
X=cbind(X,dX[(ist-b-j):(N-b-j)])
}
}
if(!is.null(dC)){X=cbind(X,dC[ist:N])}
if(!is.null(dW)){X=cbind(X,dW[ist:N])}
if(!is.null(dX2)){
X=cbind(X,dX2[(ist-b2):(N-b2)])
```