R/03b.compLTP_continuous.R

Defines functions compLTP_continuous

#LTP: likelihood, tree probabiity, posterior
compLTP_continuous=function(ET,base,power){

  ###################################################
  ###1. log likelihood
  ###################################################
  loglik=0
  bhat=list()
  yhat=NA #yhat
  for(j in ET$terminal){
    wh.j=which(ET$node.hat==j) # idx for subgroup j
    y.j=ET$y[wh.j]             # y for subgroup j
    z.j=ET$z[wh.j,ET$marker[j]]# selected marker for subgroup j
    depth.j=floor(log2(j))+1
    
    Z=cbind(1, z.j)            # Design matrix
    
    # Prior parameters
    beta_0=c(0, 0)             # Prior mean of beta (intercept and slope)
    Sigma_0=diag(2) * 10       # Prior covariance matrix for beta
    alpha_0=2                  # Shape parameter for inverse gamma prior
    beta_0_sigma=2             # Scale parameter for inverse gamma prior
    
    # Compute posterior parameters
    n=length(y.j)
    
    # Estimates
    ZtZ=t(Z) %*% Z
    Zty=t(Z) %*% y.j
    beta_hat=solve(ZtZ) %*% Zty  # OLS estimate of beta
    
    # Posterior covariance and mean for beta
    Sigma_n=solve(solve(Sigma_0) + ZtZ)
    mu_n=Sigma_n %*% (solve(Sigma_0) %*% beta_0 + Zty)
    
    # Posterior parameters for sigma^2
    alpha_n=alpha_0 + n / 2
    residuals=y.j - Z %*% beta_hat
    beta_n=beta_0_sigma + 0.5 * (t(residuals) %*% residuals +
                                   t(beta_hat - beta_0) %*% solve(Sigma_0) %*% (beta_hat - beta_0))
    
    # Posterior predictive likelihood using Student-t distribution
    df = 2 * alpha_n  # Degrees of freedom
    log_likelihood_sum=0
    for (i in 1:n) {
      z.j_star = Z[i, ]             # Row of the design matrix
      mean_pred = as.numeric(t(z.j_star) %*% mu_n)
      pred_variance = as.numeric((beta_n / alpha_n) * (1 + t(z.j_star) %*% Sigma_n %*% z.j_star))
      scale_pred = sqrt(pred_variance)
      
      # Log-likelihood value for the posterior predictive
      log_likelihood = dt((y.j[i] - mean_pred) / scale_pred, df, log = TRUE) - log(scale_pred)
      log_likelihood_sum = log_likelihood_sum + log_likelihood
    }
    loglik = loglik + log_likelihood_sum
    # Compute the posterior predictive mean for y_new at x_new
    yhat[wh.j]=as.numeric(Z %*% mu_n)
    bhat[[j]]=mu_n
  }
  ET$bhat=bhat
  ET$loglik=loglik
  ET$yhat=yhat

  ###################################################
  ##2 log tree prob
  ###################################################
  logTreeProb=0
  n.terminal=ET$terminal
  for(m in ET$terminal){ #each terminal node
    depth=floor(log2(m))
    logTreeProb = logTreeProb+log(1-prior.tree(base,power,depth))+log(ET$dir.marker[ET$marker[m]])
                              #1-splitting prob                  # marker sel prob
  }

  if(ET$numNodes>1){ #each internal node
    for(m in ET$internal){
      depth=floor(log2(m))
      logTreeProb=logTreeProb+log(prior.tree(base,power,depth))+log(ET$dir.predictor[ET$splitVariable[m]])+log(ET$eta[m])
                              #splitting prob                   # feature sel prob                         #cutoff sel prob
      #eta[m] is the number of unique values for mth predction selected for the mth internal node
    }
  }
  ET$logTreeProb=logTreeProb

  ###################################################
  ###3. log posterior
  ###################################################
  ET$logPosterior=loglik+logTreeProb #proportion of posterior since noming constant is ignored.

  return(ET)
}

Try the btrm package in your browser

Any scripts or data that you put into this service are public.

btrm documentation built on June 8, 2025, 12:45 p.m.