Adaptive Least Squares

Share:

Description

Adaptive Least Squares expecially for large spacio-temporal data

Usage

1
H.als.b(Z, Hs, Ht, Hst.ls, rho, reg, b.lag = -1, Hs0 = NULL, Ht0 = NULL, Hst0.ls = NULL)

Arguments

Z

Space-time data. A τ x n numeric matrix.

Hs

Spacial covariates (of supporting sites). An n x p_s numeric matrix.

Ht

Temporal covariates (of supporting sites). A τ x p_t numeric matrix.

Hst.ls

Space-time covariates (of supporting sites). A list of length τ, each element should be a n x p_st numeric matrix.

rho

ALS signal-to-noise ratio (SNR). A non-negative scalar.

reg

ALS regularizer. A non-negative scalar.

b.lag

ALS lag. A scalar integer, typically -1 (a-prior), or 0 (a-posteriori).

Hs0

Spacial covariates (of interpolation sites). An n* x p_s matrix, or NULL.

Ht0

Temporal covariates (of interpolation sites). A τ x p_t matrix, or NULL. If not NULL, I cannot imagine a scenario where this shouldn't be Ht.

Hst0.ls

Space-time covariates (of interpolation sites). A list of length τ, each element should be a numeric n x p_st matrix.

Value

A named list.

Z.hat

A τ x n matrix, the ith row of which is the ALS prediction of the supporting sites at time i.

B

A τ x (p_s+p_t+p_st) matrix, the ith row of which is the ALS state (partial slopes) prediction at time i.

Z0.hat

A τ x n* matrix, the ith row of which is the ALS prediction of the interpolation sites at time i.

inv.LHH

A (p_s+p_t+p_st) x (p_s+p_t+p_st) matrix. This is the (ALS predicted) covariate precision matrix at time τ.

ALS.g

The ALS gain at time τ.

Examples

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
set.seed(99999)

library(SSsimple)

tau <- 70
n.all <- 14

Hs.all <- matrix(rnorm(n.all), nrow=n.all)
Ht <- matrix(rnorm(tau*2), nrow=tau)
Hst.ls.all <- list()
for(i in 1:tau) { Hst.ls.all[[i]] <- matrix(rnorm(n.all*2), nrow=n.all) }

Hst.combined <- list()
for(i in 1:tau) { 
    Hst.combined[[i]] <- cbind( Hs.all, matrix(Ht[i, ], nrow=n.all, ncol=ncol(Ht), 
    byrow=TRUE), Hst.ls.all[[i]] ) 
}

######## use SSsimple to simulate
sssim.obj <- SS.sim.tv( 0.999, Hst.combined, 0.01, diag(1, n.all), tau )


ndx.support <- 1:10
ndx.interp <- 11:14

Z.all <- sssim.obj$Z
Z <- Z.all[ , ndx.support]
Z0 <- Z.all[ , ndx.interp]

Hst.ls <- subsetsites.Hst.ls(Hst.ls.all, ndx.support)
Hst0.ls <- subsetsites.Hst.ls(Hst.ls.all, ndx.interp)

Hs <- Hs.all[ ndx.support, , drop=FALSE]
Hs0 <- Hs.all[ ndx.interp, , drop=FALSE]

xrho <- 1/10
xreg <- 1/10
xALS <- H.als.b(Z=Z, Hs=Hs, Ht=Ht, Hst.ls=Hst.ls, rho=xrho, reg=xreg, b.lag=-1, 
Hs0=Hs0, Ht0=Ht, Hst0.ls=Hst0.ls) 



test.rng <- 20:tau

errs.sq <- (Z0 - xALS$Z0.hat)^2
sqrt( mean(errs.sq[test.rng, ]) )


################ calculate the 'effective standard errors' (actually 'effective prediction
################ errors') of the ALS partial slopes
rmse <- sqrt(mean((Z[test.rng, ] - xALS$Z.hat[test.rng, ])^2))
rmse
als.se <- rmse * sqrt(xALS$ALS.g) * sqrt(diag(xALS$inv.LHH))
cbind(xALS$B[tau, ], als.se, xALS$B[tau, ]/als.se)


## The function is currently defined as
function (Z, Hs, Ht, Hst.ls, rho, reg, b.lag = -1, Hs0 = NULL, 
    Ht0 = NULL, Hst0.ls = NULL) 
{
    tau <- nrow(Z)
    n <- ncol(Z)
    Z.als <- matrix(NA, tau, n)
    if (is.null(Hs)) {
        use.Hs <- FALSE
        d.s <- 0
    }
    else {
        use.Hs <- TRUE
        d.s <- ncol(Hs)
    }
    if (is.null(Ht)) {
        use.Ht <- FALSE
        d.t <- 0
    }
    else {
        use.Ht <- TRUE
        d.t <- ncol(Ht)
    }
    if (is.null(Hst.ls)) {
        use.Hst.ls <- FALSE
        d.st <- 0
    }
    else {
        use.Hst.ls <- TRUE
        d.st <- ncol(Hst.ls[[1]])
    }
    d <- d.s + d.t + d.st
    B <- matrix(0, tau, d)
    reg.mx <- diag(reg, d)
    LHH <- 0
    LHy <- 0
    g <- 1
    use.H0 <- !is.null(Hs0) | !is.null(Ht0) | !is.null(Hst0.ls)
    if (use.H0) {
        if (!is.null(Hs0)) {
            n0 <- nrow(Hs0)
            Z.als.0 <- matrix(NA, tau, n0)
        }
        else {
            n0 <- nrow(Hst0.ls[[1]])
        }
    }
    low.ndx <- max(1, 1 - b.lag)
    top.ndx <- min(tau, tau - b.lag)
    for (i in low.ndx:top.ndx) {
        if (use.Ht) {
            this.Ht.mx <- matrix(Ht[i, ], n, d.t, byrow = TRUE)
        }
        else {
            this.Ht.mx <- NULL
        }
        this.H <- cbind(Hs, this.Ht.mx, Hst.ls[[i]])
        if (use.H0) {
            if (use.Ht) {
                this.Ht0.mx <- matrix(Ht0[i, ], n0, d.t, byrow = TRUE)
            }
            else {
                this.Ht0.mx <- NULL
            }
            this.H0 <- cbind(Hs0, this.Ht0.mx, Hst0.ls[[i]])
        }
        this.HH <- crossprod(this.H)
        this.Hy <- crossprod(this.H, Z[i, ])
        LHH <- LHH + g * (this.HH - LHH)
        LHy <- LHy + g * (this.Hy - LHy)
        inv.LHH <- try(solve(LHH + reg.mx), silent = TRUE)
        if (class(inv.LHH) != "try=error") {
            inv.LHH <- inv.LHH
        }
        else {
            inv.LHH <- matrix(0, d, d)
        }
        B[i, ] <- inv.LHH %*% LHy
        Z.als[i, ] <- B[i + b.lag, ] %*% t(this.H)
        if (use.H0) {
            Z.als.0[i, ] <- B[i + b.lag, ] %*% t(this.H0)
        }
        else {
            Z.als.0 <- NULL
        }
        g <- (g + rho)/(g + rho + 1)
    }
    return(list(Z.hat = Z.als, B = B, Z0.hat = Z.als.0, inv.LHH = inv.LHH, 
        ALS.g = g))
  }