#' malt
#'
#' @description Implements the sampling algorithm: Metropolis Adjusted Langevin Trajectories (MALT) as described in Riou-Durand and Vogrinc (2022).
#'
#' @param init Real vector. Initial values for the sampling algorithm.
#' @param U A potential function to return the log-density of the distribution to be sampled from, up to an additive constant. It should input a real vector of the same length as \code{init} and output a scalar.
#' @param grad A function to return the gradient of the potential. It should input and output a real vector of the same length as \code{init}.
#' @param n The number of samples to be generated. Positive integer.
#' @param g The friction, a.k.a damping parameter. Non-negative real number. The choice g=0 boils down to Hamiltonian Monte Carlo.
#' @param h The time step. Positive real number.
#' @param L The number of steps per trajectory. Positive integer. The choice L=1 boils down to the Metropolis Adjusted Langevin Algorithm.
#' @param warm Should the chain be warmed up? Logical. If TRUE, the samples are generated after a warm-up phase of n successive trajectories. The first half of the warm-up phase is composed by unadjusted trajectories.
#'
#' @details Generates approximate samples from a distribution with density \deqn{\Pi(x)\propto e^{-U(x)}}{Pi(x)=exp(-U(x))/C} A Markov chain is generated by drawing successive Langevin trajectories. Each trajectory starts from a fresh Gaussian velocity and is faced with an accept-reject test known as Metropolis adjustment. The Hamiltonian Monte Carlo (HMC) algorithm is recovered as a particular case when the damping parameter is set to zero. A positive choice of damping can ensure robustness of tuning, see Riou-Durand and Vogrinc (2022) for further details.
#'
#' @references Riou-Durand and Vogrinc (2022). Available at: https://arxiv.org/abs/2202.13230.
#'
#' @seealso \code{\link{trajectory}}, which draws a Langevin trajectory and computes the numerical error along the path.
#'
#' @return Returns a list with the following objects:
#' \item{samples}{a matrix whose rows are the samples generated.}
#' \item{draw}{a vector corresponding to the last draw of the chain.}
#' \item{accept}{the acceptance rate of the chain. An acceptance rate close to zero/one indicates that the time step chosen is respectively too large/small. Optimally, the time step should be tuned to obtain an acceptance rate slightly above 65%.}
#' \item{param}{the input parameters of the malt algorithm.}
#' @importFrom stats rnorm rexp
#' @export
#'
#' @examples
#' d=50
#' sigma=((d:1)/d)^(1/2)
#' init=rnorm(d)*sigma
#' U=function(x){sum(0.5*x^2/sigma^2)}
#' grad=function(x){x/sigma^2}
#' n=10^4
#' g=1.5
#' h=0.20
#' L=8
#' malt(init,U,grad,n,g,h,L)
malt=function(init,U,grad,n,g,h,L,warm=FALSE){
d=length(init)
if(d>=2){}else{stop("length(init) should be greater or equal than 2.")}
if(n>=1){n=floor(n)}else{warning("The number of samples should be a positive integer, setting n=10000 instead.");n=10^4}
if(g>=0){g=as.numeric(g)}else{warning("The friction/damping should be a non-negative real number, setting g=0 instead.");g=0}
if(h>0){h=as.numeric(h)}else{stop("The time step should be a positive real number.")}
if(L>=1){L=floor(L)}else{warning("The number of steps should be a positive integer, setting L=1 instead.");L=1}
eta=exp(-g*h)
zeta=sqrt(1-eta^2)
half=h/2
small=h^2/8
steps=L+1
chain=matrix(0,nrow=n,ncol=d)
malt_x=init
#warm up (doubles the computational time): draw unadjusted trajectories for n/2 iterations then adjusted trajectories for n/2 iterations.
if(warm==T){
x=malt_x
grad_x=grad(x);grad_malt_x=grad_x
gradsq_x=sum(grad_x^2);gradsq_malt_x=gradsq_x
U_x=U(x);U_malt_x=U_x
norm_draws=array(stats::rnorm(n*d*(L+1)),dim=c(n,d,L+1))
expo_draws=stats::rexp(n)
for(i in 1:(n-1)){
v=norm_draws[i,,steps]
Delta=0
for(j in 1:L){
v=v-half*grad_x
x=x+h*v
grad_y=grad(x)
Delta=Delta-half*sum(v*(grad_x+grad_y))
v=v-half*grad_y
v=eta*v+zeta*norm_draws[i,,j]
grad_x=grad_y
}
gradsq_x=sum(grad_x^2)
U_x=U(x)
Delta=small*(gradsq_x-gradsq_malt_x)+U_x-U_malt_x+Delta
if(i<n/2){Delta=0}
if(expo_draws[i]<Delta){x=chain[i,];grad_x=grad_malt_x;gradsq_x=gradsq_malt_x;U_x=U_malt_x}else{grad_malt_x=grad_x;gradsq_malt_x=gradsq_x;U_malt_x=U_x}
chain[i+1,]=x
}
chain[1,]=chain[n,]
}
x=chain[1,]
grad_x=grad(x);grad_malt_x=grad_x
gradsq_x=sum(grad_x^2);gradsq_malt_x=gradsq_x
U_x=U(x);U_malt_x=U_x
norm_draws=array(stats::rnorm(n*d*(L+1)),dim=c(n,d,L+1))
expo_draws=stats::rexp(n)
for(i in 1:(n-1)){
v=norm_draws[i,,steps]
Delta=0
for(j in 1:L){
v=v-half*grad_x
x=x+h*v
grad_y=grad(x)
Delta=Delta-half*sum(v*(grad_x+grad_y))
v=v-half*grad_y
v=eta*v+zeta*norm_draws[i,,j]
grad_x=grad_y
}
gradsq_x=sum(grad_x^2)
U_x=U(x)
Delta=small*(gradsq_x-gradsq_malt_x)+U_x-U_malt_x+Delta
if(expo_draws[i]<Delta){x=chain[i,];grad_x=grad_malt_x;gradsq_x=gradsq_malt_x;U_x=U_malt_x}else{grad_malt_x=grad_x;gradsq_malt_x=gradsq_x;U_malt_x=U_x}
chain[i+1,]=x
}
output=list(samples=chain, draw=chain[n,], accept=mean(chain[2:n,1]!=chain[1:(n-1),1]),param=list(g=g,h=h,L=L))
class(output)=c("malt")
return(output)
}
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.