Demonstration examples can be run by executing the code below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 | # Model with class "dthmm" with the Beta distribution
demo("beta", package="HiddenMarkov")
# Model with class "dthmm" with the Gamma distribution
demo("gamma", package="HiddenMarkov")
# Model with class "dthmm" with the Log Normal distribution
demo("lnorm", package="HiddenMarkov")
# Model with class "dthmm" with the Logistic distribution
demo("logis", package="HiddenMarkov")
# Model with class "dthmm" with the Gaussian distribution
demo("norm", package="HiddenMarkov")
|
demo(beta)
---- ~~~~
> if (interactive()) par.default <- par(ask=TRUE)
> #----- Test Beta Distribution -----
> # "shape1" Markov dependent
> # "shape2" time dependent
>
> Pi <- matrix(c(0.8, 0.2,
+ 0.3, 0.7),
+ byrow=TRUE, nrow=2)
> x <- seq(0.01, 0.99, 0.01)
> plot(x, dbeta(x, shape1=0.5, shape2=2), type="l",
+ col="blue", ylab="Density")
> points(x, dbeta(x, shape1=8, shape2=2), type="l", col="red")
> n <- 1000
> x <- dthmm(NULL, Pi, c(0,1), "beta", pm=list(shape1=c(0.5, 6)),
+ pn=list(shape2=rep(2, n)))
> x <- simulate(x, nsim=n)
> # use above parameter values as initial values
> y <- BaumWelch(x)
iter = 1
LL = 302.9293424
diff = Inf
iter = 2
LL = 303.6582511
diff = 0.7289086
iter = 3
LL = 303.7453058
diff = 0.08705475
iter = 4
LL = 303.7656084
diff = 0.02030258
iter = 5
LL = 303.7705113
diff = 0.004902897
iter = 6
LL = 303.7717085
diff = 0.001197159
iter = 7
LL = 303.7720043
diff = 0.0002958222
iter = 8
LL = 303.7720783
diff = 7.402648e-05
iter = 9
LL = 303.7720971
diff = 1.87594e-05
iter = 10
LL = 303.7721019
diff = 4.811862e-06
> # check parameter estimates
> print(summary(y))
$delta
[1] 0 1
$Pi
[,1] [,2]
[1,] 0.7820271 0.2179729
[2,] 0.3234994 0.6765006
$nonstat
[1] TRUE
$distn
[1] "beta"
$pm
$pm$shape1
[1] 0.5092974 5.8977168
$discrete
[1] FALSE
$n
[1] 1000
> print(sum(y$delta))
[1] 1
> print(y$Pi %*% rep(1, ncol(y$Pi)))
[,1]
[1,] 1
[2,] 1
> #----- Test Beta Distribution -----
> # "shape2" Markov dependent
> # "shape1" time dependent
>
> Pi <- matrix(c(0.8, 0.2,
+ 0.3, 0.7),
+ byrow=TRUE, nrow=2)
> x <- seq(0.01, 0.99, 0.01)
> plot(x, dbeta(x, shape1=2, shape2=6), type="l",
+ col="blue", ylab="Density")
> points(x, dbeta(x, shape1=2, shape2=0.5), type="l", col="red")
> n <- 1000
> x <- dthmm(NULL, Pi, c(0,1), "beta", pm=list(shape2=c(0.5, 6)),
+ pn=list(shape1=rep(2, n)))
> x <- simulate(x, nsim=n)
> # use above parameter values as initial values
> y <- BaumWelch(x)
iter = 1
LL = 320.1889033
diff = Inf
iter = 2
LL = 323.6588268
diff = 3.469924
iter = 3
LL = 324.0219386
diff = 0.3631118
iter = 4
LL = 324.0859432
diff = 0.06400464
iter = 5
LL = 324.0972961
diff = 0.01135287
iter = 6
LL = 324.0993106
diff = 0.002014514
iter = 7
LL = 324.0996686
diff = 0.0003580029
iter = 8
LL = 324.0997324
diff = 6.378066e-05
iter = 9
LL = 324.0997438
diff = 1.140092e-05
iter = 10
LL = 324.0997458
diff = 2.046412e-06
> # check parameter estimates
> print(summary(y))
$delta
[1] 0 1
$Pi
[,1] [,2]
[1,] 0.7915871 0.2084129
[2,] 0.2395876 0.7604124
$nonstat
[1] TRUE
$distn
[1] "beta"
$pm
$pm$shape2
[1] 0.4888569 5.6812557
$discrete
[1] FALSE
$n
[1] 1000
> print(sum(y$delta))
[1] 1
> print(y$Pi %*% rep(1, ncol(y$Pi)))
[,1]
[1,] 1
[2,] 1
> if (interactive()) par(par.default)
demo(gamma)
---- ~~~~~
> #----- Test Gamma Distribution -----
> # estimate rate only
>
> Pi <- matrix(c(0.8, 0.2,
+ 0.3, 0.7),
+ byrow=TRUE, nrow=2)
> n <- 1000
> x <- dthmm(NULL, Pi, c(0,1), "gamma", pm=list(rate=c(4, 0.5)),
+ pn=list(shape=c(rep(3, n/2), rep(5, n/2))))
> x <- simulate(x, nsim=n)
> # use above parameter values as initial values
> y <- BaumWelch(x)
iter = 1
LL = -1967.0930904
diff = Inf
iter = 2
LL = -1966.8103155
diff = 0.2827749
iter = 3
LL = -1966.8070160
diff = 0.003299506
iter = 4
LL = -1966.8069111
diff = 0.0001049679
iter = 5
LL = -1966.8069075
diff = 3.594145e-06
> # check parameter estimates
> print(summary(y))
$delta
[1] 0 1
$Pi
[,1] [,2]
[1,] 0.7985225 0.2014775
[2,] 0.2845269 0.7154731
$nonstat
[1] TRUE
$distn
[1] "gamma"
$pm
$pm$rate
[1] 3.9712391 0.5014176
$discrete
[1] FALSE
$n
[1] 1000
> print(sum(y$delta))
[1] 1
> print(y$Pi %*% rep(1, ncol(y$Pi)))
[,1]
[1,] 1
[2,] 1
> #----- Test Gamma Distribution -----
> # estimate shape only
>
> Pi <- matrix(c(0.8, 0.2,
+ 0.3, 0.7),
+ byrow=TRUE, nrow=2)
> n <- 1000
> x <- dthmm(NULL, Pi, c(0,1), "gamma", pm=list(shape=c(4, 0.1)),
+ pn=list(rate=c(rep(0.5, n/2), rep(1, n/2))))
> x <- simulate(x, nsim=n)
> # use above parameter values as initial values
> y <- BaumWelch(x)
iter = 1
LL = 330.7211036
diff = Inf
iter = 2
LL = 332.0578426
diff = 1.336739
iter = 3
LL = 332.0617010
diff = 0.003858409
iter = 4
LL = 332.0617822
diff = 8.122223e-05
iter = 5
LL = 332.0617840
diff = 1.801674e-06
> # check parameter estimates
> print(summary(y))
$delta
[1] 0 1
$Pi
[,1] [,2]
[1,] 0.8111125 0.1888875
[2,] 0.3287808 0.6712192
$nonstat
[1] TRUE
$distn
[1] "gamma"
$pm
$pm$shape
[1] 3.957469 0.103773
$discrete
[1] FALSE
$n
[1] 1000
> print(sum(y$delta))
[1] 1
> print(y$Pi %*% rep(1, ncol(y$Pi)))
[,1]
[1,] 1
[2,] 1
demo(lnorm)
---- ~~~~~
> #----- Log Normal Distribution -----
>
> n <- 1000
> m <- 2
> pm <- list(meanlog=c(3, 4), sdlog=c(0.5, 0.4))
> Pi <- matrix(c(0.8, 0.2,
+ 0.3, 0.7),
+ byrow=TRUE, nrow=2)
> n <- 1000
> x <- dthmm(NULL, Pi, c(1,0), "lnorm", pm=pm)
> x <- simulate(x, nsim=n, seed=5)
> # Change initial values for delta
> x$delta <- compdelta(Pi)
> # use above parameter values as initial values
> y <- BaumWelch(x)
iter = 1
LL = -4387.7814702
diff = Inf
iter = 2
LL = -4385.4479247
diff = 2.333546
iter = 3
LL = -4385.4018378
diff = 0.04608688
iter = 4
LL = -4385.3901992
diff = 0.01163859
iter = 5
LL = -4385.3841688
diff = 0.006030368
iter = 6
LL = -4385.3798822
diff = 0.004286657
iter = 7
LL = -4385.3764843
diff = 0.00339784
iter = 8
LL = -4385.3737042
diff = 0.002780098
iter = 9
LL = -4385.3714065
diff = 0.002297769
iter = 10
LL = -4385.3694993
diff = 0.001907144
iter = 11
LL = -4385.3679127
diff = 0.001586633
iter = 12
LL = -4385.3665907
diff = 0.001322013
iter = 13
LL = -4385.3654880
diff = 0.001102709
iter = 14
LL = -4385.3645675
diff = 0.0009204855
iter = 15
LL = -4385.3637987
diff = 0.0007687887
iter = 16
LL = -4385.3631564
diff = 0.0006423333
iter = 17
LL = -4385.3626195
diff = 0.0005368164
iter = 18
LL = -4385.3621708
diff = 0.0004487098
iter = 19
LL = -4385.3617957
diff = 0.0003751047
iter = 20
LL = -4385.3614821
diff = 0.0003135938
iter = 21
LL = -4385.3612200
diff = 0.0002621781
iter = 22
LL = -4385.3610008
diff = 0.0002191946
iter = 23
LL = -4385.3608175
diff = 0.0001832573
iter = 24
LL = -4385.3606643
diff = 0.0001532097
iter = 25
LL = -4385.3605362
diff = 0.000128086
iter = 26
LL = -4385.3604291
diff = 0.0001070795
iter = 27
LL = -4385.3603396
diff = 8.951565e-05
iter = 28
LL = -4385.3602648
diff = 7.483063e-05
iter = 29
LL = -4385.3602022
diff = 6.255297e-05
iter = 30
LL = -4385.3601499
diff = 5.228827e-05
iter = 31
LL = -4385.3601062
diff = 4.370687e-05
iter = 32
LL = -4385.3600697
diff = 3.653294e-05
iter = 33
LL = -4385.3600392
diff = 3.053576e-05
iter = 34
LL = -4385.3600136
diff = 2.55226e-05
iter = 35
LL = -4385.3599923
diff = 2.133196e-05
iter = 36
LL = -4385.3599745
diff = 1.782907e-05
iter = 37
LL = -4385.3599596
diff = 1.490119e-05
iter = 38
LL = -4385.3599471
diff = 1.245389e-05
iter = 39
LL = -4385.3599367
diff = 1.040835e-05
iter = 40
LL = -4385.3599280
diff = 8.698717e-06
> # check parameter estimates
> print(summary(y))
$delta
[1] 1.000000e+00 4.170924e-84
$Pi
[,1] [,2]
[1,] 0.7923932 0.2076068
[2,] 0.2978687 0.7021313
$nonstat
[1] TRUE
$distn
[1] "lnorm"
$pm
$pm$meanlog
[1] 3.043749 4.008544
$pm$sdlog
[1] 0.4975454 0.4076449
$discrete
[1] FALSE
$n
[1] 1000
> print(sum(y$delta))
[1] 1
> print(y$Pi %*% rep(1, ncol(y$Pi)))
[,1]
[1,] 1
[2,] 1
> if (interactive()) par.default <- par(ask=TRUE)
> plot(1:n, x$x, type="l", xlab="Time", ylab="X",
+ main=paste("Log Normal ", m, "State HMM"))
> for (j in 1:m) points((1:n)[x$y==j], x$x[x$y==j], col=j+1)
> w <- seq(0.1, 150, 0.1)
> plot(w, dlnorm(w, meanlog=pm$meanlog[1], sdl=pm$sdlog[1]),
+ type="l", col=2, ylab="Density",
+ main="Log Normal Densities")
> points(w, dlnorm(w, meanlog=pm$meanlog[2], sdlog=pm$sdlog[2]),
+ type="l", col=3)
> error <- residuals(y)
> w <- hist(error, main="Log Normal HMM: Pseudo Residuals", xlab="")
> z <- seq(-3, 3, 0.01)
> points(z, dnorm(z)*n*(w$breaks[2]-w$breaks[1]), col="red", type="l")
> box()
> qqnorm(error, main="Log Normal HMM: Q-Q Plot of Pseudo Residuals")
> abline(a=0, b=1, lty=3)
> abline(h=seq(-2, 2, 1), lty=3)
> abline(v=seq(-2, 2, 1), lty=3)
> if (interactive()) par(par.default)
demo(logis)
---- ~~~~~
> if (interactive()) par.default <- par(ask=TRUE)
> #----- Test Logistic Distribution -----
>
> Pi <- matrix(c(0.8, 0.2,
+ 0.3, 0.7),
+ byrow=TRUE, nrow=2)
> pm <- list(location=c(8, -2), scale=c(1, 0.5))
> x <- dthmm(NULL, Pi, c(0,1), "logis", pm=pm)
> x <- simulate(x, nsim=1000)
> hist(x$x, main="", xlab=expression(x))
> box()
> # use above parameter values as initial values
> y <- BaumWelch(x)
iter = 1
LL = -2258.5233596
diff = Inf
iter = 2
LL = -2255.6168581
diff = 2.906502
iter = 3
LL = -2255.6163752
diff = 0.0004828851
iter = 4
LL = -2255.6163736
diff = 1.580797e-06
> # check parameter estimates
> print(summary(y))
$delta
[1] 0 1
$Pi
[,1] [,2]
[1,] 0.7620487 0.2379513
[2,] 0.2897542 0.7102458
$nonstat
[1] TRUE
$distn
[1] "logis"
$pm
$pm$location
[1] 7.934792 -1.989136
$pm$scale
[1] 0.9973621 0.4965855
$discrete
[1] FALSE
$n
[1] 1000
> print(sum(y$delta))
[1] 1
> print(y$Pi %*% rep(1, ncol(y$Pi)))
[,1]
[1,] 1
[2,] 1
> #---------------------------------------------
> # Fixed Scale Parameter
>
> Pi <- matrix(c(0.8, 0.2,
+ 0.3, 0.7),
+ byrow=TRUE, nrow=2)
> n <- 1000
> pm <- list(location=c(8, -2))
> pn <- list(scale=rep(1, n))
> x <- dthmm(NULL, Pi, c(0,1), "logis", pm=pm, pn=pn)
> x <- simulate(x, nsim=n)
> hist(x$x, main="", xlab=expression(x))
> box()
> # use above parameter values as initial values
> y <- BaumWelch(x)
iter = 1
LL = -2514.5479928
diff = Inf
iter = 2
LL = -2513.8439163
diff = 0.7040765
iter = 3
LL = -2513.8430912
diff = 0.0008250549
iter = 4
LL = -2513.8430888
diff = 2.435802e-06
> # check parameter estimates
> print(summary(y))
$delta
[1] 0 1
$Pi
[,1] [,2]
[1,] 0.7961002 0.2038998
[2,] 0.3240726 0.6759274
$nonstat
[1] TRUE
$distn
[1] "logis"
$pm
$pm$location
[1] 7.959897 -1.993391
$discrete
[1] FALSE
$n
[1] 1000
> print(sum(y$delta))
[1] 1
> print(y$Pi %*% rep(1, ncol(y$Pi)))
[,1]
[1,] 1
[2,] 1
> z <- residuals(y)
> qqnorm(z, main="Logistic HMM: Q-Q Plot of Pseudo Residuals")
> abline(a=0, b=1, lty=3)
> abline(h=seq(-2, 2, 1), lty=3)
> abline(v=seq(-2, 2, 1), lty=3)
> #---------------------------------------------
> # Fixed Location Parameter
>
> Pi <- matrix(c(0.8, 0.2,
+ 0.3, 0.7),
+ byrow=TRUE, nrow=2)
> n <- 1000
> pm <- list(scale=c(0.5, 2))
> pn <- list(location=c(rep(5, n/2), rep(1, n/2)))
> x <- dthmm(NULL, Pi, c(0,1), "logis", pm=pm, pn=pn)
> x <- simulate(x, nsim=n)
> hist(x$x, main="", xlab=expression(x))
> box()
> # use above parameter values as initial values
> y <- BaumWelch(x)
iter = 1
LL = -2130.3544084
diff = Inf
iter = 2
LL = -2128.9787313
diff = 1.375677
iter = 3
LL = -2128.7033294
diff = 0.2754019
iter = 4
LL = -2128.5433337
diff = 0.1599957
iter = 5
LL = -2128.4364584
diff = 0.1068753
iter = 6
LL = -2128.3640945
diff = 0.07236391
iter = 7
LL = -2128.3152911
diff = 0.04880338
iter = 8
LL = -2128.2825444
diff = 0.03274668
iter = 9
LL = -2128.2606657
diff = 0.02187873
iter = 10
LL = -2128.2460984
diff = 0.01456734
iter = 11
LL = -2128.2364258
diff = 0.00967255
iter = 12
LL = -2128.2300175
diff = 0.00640829
iter = 13
LL = -2128.2257794
diff = 0.004238121
iter = 14
LL = -2128.2229805
diff = 0.00279889
iter = 15
LL = -2128.2211342
diff = 0.001846292
iter = 16
LL = -2128.2199174
diff = 0.001216786
iter = 17
LL = -2128.2191161
diff = 0.0008013189
iter = 18
LL = -2128.2185887
diff = 0.0005273955
iter = 19
LL = -2128.2182418
diff = 0.0003469427
iter = 20
LL = -2128.2180136
diff = 0.0002281446
iter = 21
LL = -2128.2178636
diff = 0.0001499776
iter = 22
LL = -2128.2177651
diff = 9.85673e-05
iter = 23
LL = -2128.2177003
diff = 6.476659e-05
iter = 24
LL = -2128.2176578
diff = 4.254984e-05
iter = 25
LL = -2128.2176298
diff = 2.795038e-05
iter = 26
LL = -2128.2176115
diff = 1.835826e-05
iter = 27
LL = -2128.2175994
diff = 1.205698e-05
iter = 28
LL = -2128.2175915
diff = 7.917987e-06
> # check parameter estimates
> print(summary(y))
$delta
[1] 0 1
$Pi
[,1] [,2]
[1,] 0.7978198 0.2021802
[2,] 0.2263958 0.7736042
$nonstat
[1] TRUE
$distn
[1] "logis"
$pm
$pm$scale
[1] 0.4956886 1.9078789
$discrete
[1] FALSE
$n
[1] 1000
> print(sum(y$delta))
[1] 1
> print(y$Pi %*% rep(1, ncol(y$Pi)))
[,1]
[1,] 1
[2,] 1
> z <- residuals(y)
> qqnorm(z, main="Logistic HMM: Q-Q Plot of Pseudo Residuals")
> abline(a=0, b=1, lty=3)
> abline(h=seq(-2, 2, 1), lty=3)
> abline(v=seq(-2, 2, 1), lty=3)
> if (interactive()) par(par.default)
demo(norm)
---- ~~~~
> if (interactive()) par.default <- par(ask=TRUE)
> #----- Test Gaussian Distribution -----
> # fixed variance
>
> Pi <- matrix(c(1/2, 1/2, 0,
+ 1/3, 1/3, 1/3,
+ 0, 1/2, 1/2),
+ byrow=TRUE, nrow=3)
> p1 <- c(1, 6, 3)
> p2 <- c(0.5, 1, 0.5)
> n <- 1000
> pm <- list(mean=p1)
> pn <- list(sd=rep(0.5, n))
> n <- 1000
> x <- dthmm(NULL, Pi, c(0,1,0), "norm", pm=pm, pn=pn)
> x <- simulate(x, nsim=n)
> # use above parameter values as initial values
> y <- BaumWelch(x)
iter = 1
LL = -1560.7194355
diff = Inf
iter = 2
LL = -1558.5280107
diff = 2.191425
iter = 3
LL = -1558.5279916
diff = 1.911254e-05
iter = 4
LL = -1558.5279916
diff = 2.909474e-08
> # check parameter estimates
> print(summary(y))
$delta
[1] 0 1 0
$Pi
[,1] [,2] [,3]
[1,] 0.4774057 0.5225943 0.0000000
[2,] 0.3269865 0.3355469 0.3374666
[3,] 0.0000000 0.4503480 0.5496520
$nonstat
[1] TRUE
$distn
[1] "norm"
$pm
$pm$mean
[1] 0.990279 5.992571 2.980833
$discrete
[1] FALSE
$n
[1] 1000
> print(sum(y$delta))
[1] 1
> print(y$Pi %*% rep(1, ncol(y$Pi)))
[,1]
[1,] 1
[2,] 1
[3,] 1
> #----- Test Gaussian Distribution -----
> # residual process, zero mean, Markov driven variance
>
> Pi <- matrix(c(0.9, 0.1,
+ 0.9, 0.1),
+ byrow=TRUE, nrow=2)
> n <- 1000
> pn <- list(mean=rep(0, n))
> pm <- list(sd=c(1, 3))
> n <- 1000
> x <- dthmm(NULL, Pi, c(0,1), "norm", pm=pm, pn=pn)
> x <- simulate(x, nsim=n)
> # check transition probabilities
> m <- nrow(Pi)
> estPi <- table(x$y[-n], x$y[-1])
> rowtotal <- estPi %*% matrix(1, nrow=m, ncol=1)
> estPi <- diag(as.vector(1/rowtotal)) %*% estPi
> print(estPi)
1 2
[1,] 0.8922559 0.1077441
[2,] 0.8981481 0.1018519
> # use above parameter values as initial values
> y <- BaumWelch(x)
iter = 1
LL = -1638.8647327
diff = Inf
iter = 2
LL = -1637.6849221
diff = 1.179811
iter = 3
LL = -1637.3363512
diff = 0.3485709
iter = 4
LL = -1637.0939698
diff = 0.2423814
iter = 5
LL = -1636.8970028
diff = 0.196967
iter = 6
LL = -1636.7351837
diff = 0.1618192
iter = 7
LL = -1636.6027181
diff = 0.1324655
iter = 8
LL = -1636.4947383
diff = 0.1079798
iter = 9
LL = -1636.4070356
diff = 0.08770272
iter = 10
LL = -1636.3360148
diff = 0.0710208
iter = 11
LL = -1636.2786439
diff = 0.05737091
iter = 12
LL = -1636.2323925
diff = 0.0462514
iter = 13
LL = -1636.1951667
diff = 0.03722586
iter = 14
LL = -1636.1652452
diff = 0.0299215
iter = 15
LL = -1636.1412209
diff = 0.02402424
iter = 16
LL = -1636.1219486
diff = 0.01927231
iter = 17
LL = -1636.1064992
diff = 0.01544936
iter = 18
LL = -1636.0941215
diff = 0.01237778
iter = 19
LL = -1636.0842090
diff = 0.009912487
iter = 20
LL = -1636.0762735
diff = 0.00793552
iter = 21
LL = -1636.0699222
diff = 0.006351257
iter = 22
LL = -1636.0648398
diff = 0.005082408
iter = 23
LL = -1636.0607732
diff = 0.004066636
iter = 24
LL = -1636.0575194
diff = 0.003253754
iter = 25
LL = -1636.0549160
diff = 0.002603417
iter = 26
LL = -1636.0528328
diff = 0.002083228
iter = 27
LL = -1636.0511656
diff = 0.001667202
iter = 28
LL = -1636.0498311
diff = 0.00133451
iter = 29
LL = -1636.0487626
diff = 0.001068469
iter = 30
LL = -1636.0479069
diff = 0.0008557234
iter = 31
LL = -1636.0472213
diff = 0.0006855859
iter = 32
LL = -1636.0466718
diff = 0.0005495089
iter = 33
LL = -1636.0462311
diff = 0.000440657
iter = 34
LL = -1636.0458775
diff = 0.0003535657
iter = 35
LL = -1636.0455937
diff = 0.0002838676
iter = 36
LL = -1636.0453656
diff = 0.0002280723
iter = 37
LL = -1636.0451822
diff = 0.0001833907
iter = 38
LL = -1636.0450346
diff = 0.0001475944
iter = 39
LL = -1636.0449157
diff = 0.0001189029
iter = 40
LL = -1636.0448198
diff = 9.589376e-05
iter = 41
LL = -1636.0447424
diff = 7.743026e-05
iter = 42
LL = -1636.0446798
diff = 6.260427e-05
iter = 43
LL = -1636.0446291
diff = 5.069004e-05
iter = 44
LL = -1636.0445880
diff = 4.110753e-05
iter = 45
LL = -1636.0445546
diff = 3.339313e-05
iter = 46
LL = -1636.0445274
diff = 2.717616e-05
iter = 47
LL = -1636.0445053
diff = 2.216017e-05
iter = 48
LL = -1636.0444872
diff = 1.810807e-05
iter = 49
LL = -1636.0444723
diff = 1.483008e-05
iter = 50
LL = -1636.0444601
diff = 1.217437e-05
iter = 51
LL = -1636.0444501
diff = 1.001925e-05
iter = 52
LL = -1636.0444419
diff = 8.267322e-06
> # check parameter estimates
> print(summary(y))
$delta
[1] 0 1
$Pi
[,1] [,2]
[1,] 0.9025108 9.748919e-02
[2,] 0.9999988 1.188985e-06
$nonstat
[1] TRUE
$distn
[1] "norm"
$pm
$pm$sd
[1] 1.041785 3.001119
$discrete
[1] FALSE
$n
[1] 1000
> print(sum(y$delta))
[1] 1
> print(y$Pi %*% rep(1, ncol(y$Pi)))
[,1]
[1,] 1
[2,] 1
> plot(1:n, x$x, type="l", xlab="Time", ylab="")
> points((1:n)[x$y==2], x$x[x$y==2], pch=15, col="red")
> title(sub="Those marked in red are in state 2")
> states <- Viterbi(y)
> wrong <- (states!=x$y)
> print(table(x$y, wrong))
wrong
FALSE TRUE
1 887 5
2 31 77
> par(mfrow=c(1, 2))
> s1 <- hist(x$x[x$y==1], col="gray70", xlab="",
+ main="Observed X Values When in State 1")
> hist(x$x[x$y==1 & wrong], breaks=s1$breaks, add=TRUE, col="red")
> title(sub="Red denotes the number of incorrectly determined states by Viterbi")
> box()
> s2 <- hist(x$x[x$y==2], col="gray70", xlab="",
+ main="Observed X Values When in State 2")
> hist(x$x[x$y==2 & wrong], breaks=s2$breaks, add=TRUE, col="red")
> title(sub="Red denotes the number of incorrectly determined states by Viterbi")
> box()
> par(mfrow=c(1, 1))
> if (interactive()) par(par.default)
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.