# GP inference
# GP inference
#' Draw from a conditioned MVN
#'
#' @param times va
#' @param x va
#' @param sigma.var va
#' @param l va
#' @param pos va
#' @param noise va
#' @param do.log va
#' @param known va
#' @param f va
#'
#' @return va
#'
#' @examples
draw_conditionedMVN <- function(times, x, sigma.var, l, pos = 'start', noise = 0.001, do.log = T, known = 10, f ='current'){
if(pos == 'end'){
x <- rev(x)
}
# scale everything to reduce numerical issues on the MVN
scale=1
times <- times*scale
l <- l*scale
sigma.var <- sigma.var*scale
x <- x*scale
noise <- noise*scale
known <- (length(x)- known):length(x)
# Get covariance matrix on times.
Sigma <- matrix(NA, ncol = length(times), nrow = length(times))
for(i in 1:length(times)){
for(j in 1:length(times)){
Sigma[i,j] <- covariance(times[i], times[j], sigma_f2 = sigma.var, sigma_v2 = noise, l = l)
}
}
# Split into known and unknown parts
sigma.22 <- Sigma[known,known]
sigma.12 <- Sigma[(1:length(x))[-known],known]
sigma.21 <- Sigma[known,(1:length(x))[-known]]
sigma.11 <- Sigma[-known,-known]
# create the conditioned mean, mu.cur, depending on choice of f and current x, x.
if (f =='current'){
mu.cur <- x[-known]
}
else if( f == 'mean'){
new.mu <- rep(mean(x), length(x))
mu.cur <- new.mu[-known] + sigma.12 %*% solve(sigma.22) %*% (x[known] - new.mu[known])
}
else if( f == 'min'){
new.mu <- rep(min(x), length(x))
mu.cur <- new.mu[-known] + sigma.12 %*% solve(sigma.22) %*% (x[known] - new.mu[known])
}
else if(f =='combined'){
new.mu <- colMeans(rbind(rep(min(x), length(x)), x))
mu.cur <-new.mu[-known] + sigma.12 %*% solve(sigma.22) %*% (x[known] - new.mu[known])
}
else{
stop('Invalid choice of f!')
}
conditioned.sigma <- sigma.11 - sigma.12 %*% solve(sigma.22) %*% t(sigma.12)
# Draw from the MVN with mean and sigma generated by the conditioned mean and covariance.
# We set checkSymmetry = F since numerically new.Sigma is not symmetric due to numerical calculations
draw <- mvtnorm::rmvnorm(n=1, mean = mu.cur, sigma = conditioned.sigma, method = 'chol', checkSymmetry = F)
draw <-c(draw,x[known]) # Adjoin the conditioned values on the end.
draw <- draw/scale
# Now we want to calculate the proposal ratio.
# Calculate mu.can the mean of the MVN for proposing x.cur.
if (f =='current'){
mu.can <- draw[-known]
}
else if( f == 'mean'){
new.mu <- rep(mean(draw), length(x))
mu.can <- new.mu[-known] + sigma.12 %*% solve(sigma.22) %*% (x[known] - new.mu[known])
}
else if( f == 'min'){
new.mu <- rep(min(draw), length(x))
mu.can <- new.mu[-known] + sigma.12 %*% solve(sigma.22) %*% (x[known] - new.mu[known])
}
else if(f =='combined'){
new.mu <- colMeans(rbind(rep(min(draw), length(x)), draw))
mu.can <-new.mu[-known] + sigma.12 %*% solve(sigma.22) %*% (x[known] - new.mu[known])
}
else{
stop('Invalid choice of f!')
}
# Calculate the inverse of the covariance
i.conditioned.sigma <- solve(conditioned.sigma)
proposal.ratio <- -0.5*t((draw[-known] - mu.cur)) %*% i.conditioned.sigma %*% (draw[-known] - mu.cur) + 0.5* t((x[-known] - mu.can)) %*% i.conditioned.sigma %*% (x[-known] - mu.can)
if(pos == 'end'){
draw <- rev(draw)
}
return(list(draw = draw, proposal.ratio = proposal.ratio, mu.cur = mu.cur, mu.can = mu.can))
}
#' Square Exponential covariance function
#'
#' @param t Time difference
#' @param sigma Signal Variance
#' @param l Length Scale
#'
#' @return The Covariance
#' @export
#'
cov.fn <- function(t, sigma = sqrt(1000), l = 3) {
sigma^2 * exp(-t^2 / (2 * l^2))
}
#' Simpson's rule
#'
#' @param x x
#' @param y y
#'
#' @return integral?
#' @export
#'
cumsimpson <- function(x, y) {
ry <- 1L
cy <- length(y)
dx <- diff(x)
dy <- diff(y)
dx1 <- dx[-length(dx)]
dx2 <- dx[-1L]
dxs <- dx1 + dx2
dy1 <- dy[-length(dy)]
dy2 <- dy[-1L]
a <- (dy2 / (dx2 * dxs) - dy1 / (dx2 * dxs)) / 3
b <- (dy2 * dx1 / (dx2 * dxs) + dy1 * dx2 / (dx1 * dxs)) / 2
c <- y[-c(1L, length(y))]
i1 <- ((a * dx1 - b) * dx1 + c) * dx1 # left half integral
i2 <- ((a * dx2 + b) * dx2 + c) * dx2 # right half integral
z <- vector("numeric", length(y))
z[seq(2, length(z) - 1, 2)] <- i1[seq(1, length(i1), 2)]
z[seq(3, length(z), 2)] <- i2[seq(1, length(i2), 2)]
z[length(z)] <- i2[length(i2)]
cumsum(z)
}
#' Simulate a Gaussian Process using Spectral decomposition
#'
#' @param cov.fn The covariance function
#' @param l the length scale
#' @param dt.GP The step size
#' @param steps Number of steps required
#'
#' @return A draw from the Gaussian process
#' @export
#'
#' @examples
stat_gp <- function(cov.fn, l,dt.GP = 0.8, steps = 1000) {
## Generates a GP by choosing dt in the spectral representation. This is the
## latest version of the implementation that unties variables for the
## spectrum computation from those for generating the GP.
## cov - function handle to covariance funtion
## rng_seed - seed for random number generator
## cov=@(t) sigma^2*exp(-t.^2/(2*gamma^2));
## options = optimoptions('fsolve','TolFun',1e-8);
## ^^ set param 'TolFun'=1e-8 for the 'fsolve' optimizer
## compute spectrum and spectral cut-off frequency
Nt_cor <- 100
dt <- l / Nt_cor
eps.cov <- 1e-4
Tmax <- pracma::fzero(function(t) cov.fn(t, l = l) - eps.cov,
5 * l, tol = 1e-8)
Tmax <- ceiling(Tmax$x / dt) * dt
Nt <- Tmax / dt
ind_0 <- Nt + 1
k <- -Nt:(Nt - 1)
tcov <- k * dt
cov.t <- cov.fn(tcov,l=l)
freq <- k / (2 * Tmax);
w <- 2 * pi * freq;
dw <- 2 * pi / (2 * Tmax);
S <- abs(pracma::fftshift(stats::fft(cov.t)) * Tmax / Nt) / (2 * pi);
Int_S <- cumsimpson(w,S);
## Test for convergence of spectral integral
rel_error_Int_S = (Int_S[(2*Nt - 10L):(2*Nt)] -
Int_S[(2*Nt - 11L):(2*Nt - 1L)]) /
Int_S[(2*Nt - 10L):(2*Nt)]
eps_Int_S <- 1e-3
if (sum(abs(rel_error_Int_S) > eps_Int_S) > 0) {
stop("w-range for spectrum too small. Increase Tmax. (eps_Int_S = ",
eps_Int_S, ").")
}
eps_cutoff <- 1e-3
ind_wc <- which(Int_S > (1 - eps_cutoff) * Int_S[2*Nt])[1]
Nw <- ind_wc - ind_0
wc <- Nw * dw
## generate stochastic process with dt close to intial guess di_GP
# Nw <- max(ceiling((steps+1)*dt.GP*wc/(2*pi)),2)
# print(Nw)
# Nw <- Nw.input
Nw = 128
dw2 <- wc/Nw
T0 <- 2 * pi / dw2
Mw <- floor(T0 / dt.GP)
dt <- T0 / Mw
# # First fix N and find the corresponding M value
# M <- (2*pi*Nw)/(wc*dt.GP) ;M
# # best M for computation larger than M
# M.new <- min(2^(floor(log((2*pi*Nw)/(wc*dt.GP),base=2)))
#
# ; M.new
# wc.new <- (2*pi*Nw)/(M.new*dt.GP) ;wc.new
#
# Mw <- M.new ; wc <- wc.new ; dt <- dt.GP
# dw2 <- wc/Nw
# T0 <- 2 * pi / dw2
if (dt > pi / wc)
stop('Reduce dt to be consistent with cut-off frequency wc')
## recompute spectrum with new spetracl discretisation
dts <- 2 * pi / (2 * wc)
cov_t <- cov.fn((-Nw:(Nw - 1)) * dts, l=l)
S2 <- abs((stats::fft(cov_t) * dts)) / (2 * pi)
w2 <- (-Nw:(Nw - 1)) * dw2
A <- vector("numeric", Mw)
A[1:Nw] <- sqrt(2 * S2[1:Nw] * dw2)
A[1] <- 0
## not sure why the seed is being set? so I skipped
# if (!is.null(rng_seed)) set.seed(rng_seed)
phi <- vector("numeric", Mw)
phi[1:Nw] <- 2 * pi * stats::runif(Nw)
B <- sqrt(2) * A * exp(complex(real = 0, imaginary = 1) * phi)
GP <- Mw * Re(stats::fft(B, inverse = TRUE) / length(B))
return(list("GP"=GP[1:(steps+1)],"dt"=dt))
}
#' Function for Dirac Delta function.
#'
#' @param x x
#' @param y y
#'
#' @return Dirac Delta function
#' @export
#'
#' @examples
delta<-function(x=0,y=0){
if (x==y){
return(1)
} else {
return(0)
}
}
#' Calculate the covarience of two given points.
#'
#' @param x x
#' @param y y
#' @param sigma_f2 signal variance
#' @param sigma_v2 noise variance
#' @param l length scale
#'
#' @return covariance
#' @export
#'
#' @examples
covariance <- function(x,y,sigma_f2,sigma_v2,l){
cov <- sigma_f2 * exp( (-(x-y)**2)/(2* l**2) ) +sigma_v2* delta(x,y)
return(cov)
}
#' Function that returns the covariance matrix Sigma.
#'
#' @param tStop end time
#' @param l length scale
#' @param sigma_f2 signal variance
#' @param sigma_v2 noise variance
#' @param steps steps
#'
#' @return Covariance matrix
#' @export
#'
#' @examples
GetSigma <- function(tStop,l,sigma_f2,sigma_v2,steps){
grid <- seq(0,tStop,tStop/steps)
Sigma <- matrix(c(1:((length(grid))**2)),nrow=(length(grid)))
for (i in 1:(length(grid))){
for(j in 1:(length(grid))){
Sigma[i,j]<- covariance(grid[i],grid[j],sigma_f2,sigma_v2,l)
}
}
return(Sigma)
}
# A function to calculate the pi(l|x,y) = pi(l|x) (l doesn't depend of y).
#' Title
#'
#' @param l Length scale
#' @param x Intensity function
#' @param end.time Experiment end time
#' @param sigma.f2 signal variance
#' @param sigma.n2 noise variance
#'
#' @return Marginal of the length scale
#' @export
#'
#' @examples
log_pi_l <- function(l,x,end.time,sigma.f2,sigma.n2){
# Need to consider the logarithm of the intensity as prior is defined on the logarithm.
x <- log(x)
# First calculate the prior for l (l~exp(0.01))
log.prior.l <- stats::dexp(l, rate = 0.01, log = TRUE)
# log.prior.l <- dgamma(l, shape = 2.5, rate = 0.5, log = TRUE)
# log.prior.l <- dlnorm(l, meanlog = log(8), sdlog = 0.3, log = TRUE)
# Next need to calculate the likelihood of the intensity given l. pi(x|l).
# First get the covariace matrix Sigma.l.
Sigma.l <- GetSigma(end.time,l,sigma.f2,sigma.n2,(length(x)-1))
# Calculate the density at x of the MVN(0,Sigma.l) distribution.
log.likeli.l <-mvtnorm::dmvnorm(x, mean = rep(0,length(x)), sigma = Sigma.l, log = TRUE)
return(log.prior.l + log.likeli.l)
}
#' Title za
#'
#' @param d.spikes za
#' @param end.time za
#' @param iter za
#' @param burn za
#' @param steps za
#' @param w za
#' @param hyper.param za
#' @param prior.x za
#' @param start.l za
#' @param ISI.type za
#' @param start.hyper za
#' @param hyper.start za
#' @param x.start za
#' @param sigma.h za
#' @param sigma.l za
#' @param T.min.param za
#' @param sigma.t za
#' @param l.param za
#' @param initial.l za
#'
#' @return za
#' @export
#'
#' @examples
mcmc_gp <- function(d.spikes, end.time, iter, burn, steps, w, prior.x =c(1000,1e-5), hyper.param = c(1,0.001), ISI.type = "Gamma", sigma.h =NA, start.hyper = 1000, hyper.start = 1, l.param = 1, start.l = 1000, sigma.l = NA, initial.l = 1, T.min.param = NA, sigma.t = NA, x.start = NULL){
# Error messages
# Check hyper.param has length 1 or 2. (for either fixed at X or prior is Gam(X,X))
if(length(hyper.param)>2){stop("Error! The length of hyper.param != 1 or 2.\n The hyper.param you entered has length ",
length(hyper.param),". \n")}
# Set up the prior parameters.
sigma.f2 <- prior.x[1]
sigma.n2 <- prior.x[2]
out.likeli <- NULL
out.piL <- NULL
# Update to store the number of times that we accept the candidate value.
update <- c(0,0)
# Setup if we need to sample T.min.
if(is.na(T.min.param)){
T.min = 0 ; max.T.min = 0.1
}
else if(length(T.min.param)==1 && is.numeric(T.min.param)){
# Fixed T.min value
T.min = T.min.param
# Define T.max so that we don't put forward a value larger then possible.
max.T.min <- 100000
for(i in 1:ncol(d.spikes)){
s <- d.spikes[,i]
s <- s[!is.na(s)]
new.T.max <- min(s[-1] - s[-length(s)])
max.T.min <- min(max.T.min,new.T.max)
}
if(T.min > max.T.min){
stop('Your T.min value is larger then the smallest ISI.')
}
}
else if(length(T.min.param)==2 && is.numeric(T.min.param)){
out.T.min <- rep(NA,iter)
if(is.na(sigma.t) == TRUE){stop("Error! If you want to use mcmc to get T.min estimation you require a sigma.t value.")}
# Define T.max so that we don't put forward a value larger then possible.
max.T.min <- 100000
for(i in 1:ncol(d.spikes)){
s <- d.spikes[,i]
s <- s[!is.na(s)]
if(s[length(s)]-end.time <1e-10){s <- s[-length(s)]} # Remove last time if it corresponds to length of experiment
new.T.max <- min(s[-1] - s[-length(s)])
max.T.min <- min(max.T.min,new.T.max)
}
T.min = min(1e-10, max.T.min/2) # Set T.min to a valid value.
}
else{
stop('Invalid Refractory period settings!')
}
# Setup if we need to sample l.
if(length(l.param) == 1 && is.numeric(l.param)){
l.cur = l.param
}
else if(length(l.param) == 2 && is.numeric(l.param)){
# Check valid sigma.l
if(!(length(sigma.l) == 1 && is.numeric(sigma.l))){
stop('Invalid sigma.l!')
}
if(sigma.l < 0){
stop('sigma l must be positive!')
}
# Check valid start.l
if(!(length(start.l) == 1 && is.numeric(start.l))){
stop('Invalid start.l!')
}
l.cur = initial.l
out.l <- rep(NA,iter)
}
###############
## Set the initial value of x. (centered about the mean spiking intensity (#spikes/time), with a wave (sine))
# wheere if x.start is defined this is the starting value (x.start must be a vector of length steps+1)
###############
t_orig <- seq(0,end.time, end.time/steps)
avg.spikes <- sum(!is.na(d.spikes))/(ncol(d.spikes)*end.time)
# x.cur <- rep(avg.spikes,steps+1)
if(is.null(x.start)){
x.cur <- rep(avg.spikes,steps+1) # + (0.5*avg.spikes)*sin((t_orig*5)/end.time) # Begin with wave so that the inital length scale is appropiate.
}
else{
x.cur <- x.start
}
# Set the inital value of the hyper parameters if required.
# I.e if hyper.param has length 1 set hyp.cur to this value. If not set hyp.cur to hyper.start value.
if(length(hyper.param) ==1){
hyp.cur <- hyper.param
}
else(hyp.cur <- hyper.start)
# Create stores for the outputs, matrix out.x for x, and vectors out.l, out.hyper for l and ISI hyper parameter respectively.
out.x <- matrix(NA, nrow=iter, ncol=steps+1)
out.hyper <- rep(NA,iter)
# When calculating l we need to shorten the length of the intensity function. We do this to 50.
factor <- steps / 50
shortened <- seq(1,(steps+1), factor)
# sigga the Covariance matrix for updating the start of x.
times <- seq(0,end.time, end.time/steps)
sigga <- matrix(NA, ncol=length(times), nrow = length(times))
for(i in 1:length(times)){
for(j in 1:length(times)){
sigga[i,j] <- covariance(times[i],times[j],sigma_f2 = prior.x[1], sigma_v2 =prior.x[2] ,l = l.param)
}
}
i.sigga <- solve(sigga)
pb <- utils::txtProgressBar(min = 1, max = iter+burn, style = 3)
# mcmc loop starts here
for (i in 2:(iter+burn)) {
###############
# Update x
###############
# Draw from the 'prior' i.e \nu \sim \mathcal{N}(\mathbf{0},\mathbf{\Sigma})
nu <- stat_gp(cov.fn, l = l.cur, dt.GP = end.time/steps, steps = steps)$GP
# Get the candidate x.
x.can.star <- (1-w**2)**0.5 * log(x.cur) + w * nu
x.can <- exp(x.can.star)
# x.can <- (1-w**2)**0.5 * x.cur + w * nu
# calculate log likelihood.
log.likli.can <- log_pi_x(d.spikes=d.spikes, hyper.param = hyp.cur, x= x.can, end.time = end.time, T.min = T.min, max.T.min = max.T.min, ISI.type = ISI.type, do.log = TRUE)
log.likli.cur <- log_pi_x(d.spikes=d.spikes, hyper.param = hyp.cur, x= x.cur, end.time = end.time, T.min = T.min, max.T.min = max.T.min, ISI.type = ISI.type, do.log = TRUE)
# draw u from U(0,1).
u <- stats::runif(1)
# M-H ratio
if (log(u) < log.likli.can - log.likli.cur) {
update[1] <- update[1]+1
x.cur <- x.can
}
###############
# Update x (start of x only)
###############
# if(i%%1000==0){
# times <- seq(0,end.time, end.time/steps)
# max.width <- max(d.spikes[1,1]/(end.time/steps), 200) # Time of the first spike or if close to beginning set to 200.
# # Begin by proposing from min version x5.
# for(j in 1:5){
# width <- sample(30:(max.width+100), size=1) # Choose the width of the section we are going to update.
# # Time and log x restricted to the region of interest
# region <- times[1:width]
# x.cur.region <- log(x.cur[1:width])
#
# # Draw from a MVN conditioned on the last 10 values being known and equal to x.cur
# x.can.region <- draw_conditionedMVN(region,log(x.cur[1:length(region)]),sigma.var=10, l = l.cur, noise =1e-6, f='min')
#
# # Setup the candidate on the entire time interval
# x.can <- x.cur
# x.can[1:length(region)] <- exp(x.can.region$draw)
#
# likelihood.ratio <- log_pi_x(d.spikes=d.spikes, hyper.param = hyp.cur, x= x.can, end.time = end.time, T.min = T.min, max.T.min = max.T.min, ISI.type = ISI.type, do.log = TRUE) -log_pi_x(d.spikes=d.spikes, hyper.param = hyp.cur, x= x.cur, end.time = end.time, T.min = T.min, max.T.min = max.T.min, ISI.type = ISI.type, do.log = TRUE)
#
# range <- seq(1,2001,1)
# prior.ratio <- -0.5*t(log(x.can[range])) %*% i.sigga %*% log(x.can[range]) +
# 0.5*t(log(x.cur[range])) %*% i.sigga %*% log(x.cur[range])
#
# proposal.ratio <- x.can.region$proposal.ratio
#
# p.acc <- likelihood.ratio + prior.ratio + proposal.ratio
#
# u <- stats::runif(1)
#
# # M-H ratio
# if (log(u) < p.acc) {
# x.cur <- x.can
# # print('change start')
# }
#
# # region at the end
# # region at end.
# max.widthB <- max(steps+1-(max(d.spikes[d.spikes < end.time-1e-10],na.rm=T)/(end.time/steps)), 200)
# width <- sample((30):(max.widthB+100), size=1)
# region <- times[(length(times)-width):length(times)]
# x.cur.region <- log(x.cur[(length(times)-width):length(times)])
#
# x.can.region <- draw_conditionedMVN(region,x.cur.region,sigma.var=10, l = l.cur, noise =1e-6, pos = 'end',f='min')
# x.can <- x.cur
# x.can[(length(times)-width):length(times)] <- exp(x.can.region$draw)
#
#
# likelihood.ratio <- log_pi_x(d.spikes=d.spikes, hyper.param = hyp.cur, x= x.can, end.time = end.time, T.min = T.min, max.T.min = max.T.min, ISI.type = ISI.type, do.log = TRUE) -log_pi_x(d.spikes=d.spikes, hyper.param = hyp.cur, x= x.cur, end.time = end.time, T.min = T.min, max.T.min = max.T.min, ISI.type = ISI.type, do.log = TRUE)
#
# range <- seq(1,2001,1)
# prior.ratio <- -0.5*t(log(x.can[range])) %*% i.sigga %*% log(x.can[range]) +
# 0.5*t(log(x.cur[range])) %*% i.sigga %*% log(x.cur[range])
#
# proposal.ratio <- x.can.region$proposal.ratio
#
# p.acc.end <- likelihood.ratio + prior.ratio + proposal.ratio
#
# # draw u from U(0,1).
# u <- stats::runif(1)
#
# # M-H ratio
# if (log(u) < p.acc.end) {
# x.cur <- x.can
# # print('change end')
# }
# }
# }
###############
# update l (doesn't depend of ISI parameters)
###############
# Check we input l as a parameter.
if(length(l.param) == 2){
# Only start updating l once we have reached the start.l-th iteration.
# Only update every 2 (default) iterations change i%%2 == 0 if you want to update a different no of times.
if(i > start.l && i%%2 == 0){
# Get the candidate value for l.
l.can <- stats::rnorm(1,l.cur,sigma.l)
# Check the candidate value is in a reasonable range (must be >0 and < 50 chosen ad hoc, may need changing?)
if (l.can >0 && l.can < 50){
# Calculate the likelihood.
# Note this is calculated with a shortened intensity to aid computation time.
log.pi.l.can <- log_pi_l(l.can,x.cur[shortened],end.time,sigma.f2,sigma.n2)
log.pi.l.cur <- log_pi_l(l.cur,x.cur[shortened],end.time,sigma.f2,sigma.n2)
# draw u from U(0,1)
u <- stats::runif(1)
# M-H ratio
if (log(u) < log.pi.l.can - log.pi.l.cur) {
update[2] <- update[2]+1
l.cur <- l.can
}
}
}
}
###############
# Update hyper parameter.
###############
if(length(hyper.param) == 2 && i > start.hyper){
# Get the candidate value for the hyper.
hyp.can <- stats::rnorm(1, hyp.cur, sigma.h)
# Chec candidate value is reasonable.
if (hyp.can > 0) {
log.pi.cur <- log_pi_hyper_param(d.spikes = d.spikes, hyper.param = hyp.cur, x=x.cur, end.time = end.time, prior.hyper.param = hyper.param, T.min = T.min, max.T.min = max.T.min, ISI.type = ISI.type)
log.pi.can <- log_pi_hyper_param(d.spikes = d.spikes, hyper.param = hyp.can, x=x.cur, end.time = end.time, prior.hyper.param = hyper.param, T.min = T.min, max.T.min = max.T.min, ISI.type = ISI.type)
# M-H ratio
# draw u from U(0,1)
u <- stats::runif(1)
if (log(u) < log.pi.can - log.pi.cur) {
hyp.cur <- hyp.can
}
}
}
###############
### Update T.min
###############
if(length(T.min.param)==2){
T.min.can <- stats::rnorm(1, T.min, sigma.t)
if (T.min.can > 0 && T.min.can < max.T.min) {
log.pi.T.cur <- log_pi_Tmin(d.spikes = d.spikes, T.min = T.min, max.T.min = max.T.min, hyper = hyp.cur, x = x.cur, end.time = end.time, prior.T.min = T.min.param, ISI.type =ISI.type)
log.pi.T.can <- log_pi_Tmin(d.spikes = d.spikes, T.min = T.min.can, max.T.min = max.T.min, hyper = hyp.cur, x = x.cur, end.time = end.time, prior.T.min = T.min.param, ISI.type =ISI.type)
# M-H ratio
# draw from a U(0,1)
u <- stats::runif(1)
if (log(u) < log.pi.T.can - log.pi.T.cur) {
T.min <- T.min.can
}
}
}
###############
# Store results.
###############
if (i>burn){
j <- i-burn
out.x[j,]<- x.cur
if(length(l.param)==2){out.l[j] <- l.cur}
if(length(hyper.param)==2){ out.hyper[j] <- hyp.cur}
if(length(T.min.param)==2){ out.T.min[j] <- T.min}
out.likeli <- c(out.likeli,log.likli.cur)
if(length(l.param)==2){out.piL <- c(out.piL, log.pi.l.cur )}
}
utils::setTxtProgressBar(pb, i) # update text progress bar after each iter
}
# Calculate the probability a move gets accepted.
# print(update)
p.x.acc = update[1]/(iter+burn)
p.l.acc = update[2]/(iter+burn)
hyp <- list(hyper = hyper.param) ; l <- list(l = l.param) ; t <- list(T.min = T.min.param)
intensity <- list(intensity = out.x)
if (length(hyper.param)==2){ hyp <- list(hyper = out.hyper)}
if (length(T.min.param)==2){ t <- list(T.min = out.T.min)}
if(length(l.param)==2){ l <- list(l = out.l)}
output <- do.call("c",list(intensity,hyp,t,l))
return(output)
}
#' Approximate l from PWC
#'
#' @param x.pwc the intensity function from a PWC prior.
#' @param end.time.real The end.time of the experiment
#' @param end.time End time used in the statistical analysis.
#' @param sigma.f2 sigma.f2
#' @param sigma.n2 sigma.n2
#'
#' @return l value.
#' @export
#'
#' @examples
get_l_single <- function(x.pwc, end.time.real, end.time = 20,sigma.f2=500,sigma.n2 = 1e-5){
data <- x.pwc * end.time.real/end.time
t <- seq(0,end.time,end.time/(length(x.pwc)-1))
A <- cbind(t,data)
df <- data.frame(A)
# los1 <- loess(data~t,df,span =0.1)
# smoothed <- pmax(predict(los1),1e-5)
#
# Automatically pick span
los2 <- fANCOVA::loess.as(t,data,degree = 1)
smoothed <- pmax(stats::predict(los2),1e-5)
# Estimate l using smoothed.
A <- seq(1,length(x.pwc),((length(x.pwc)-1)/20))
# optim minimises so create marginal_l = -log_pi_l to minimise
marginal_l <- function(l,x,end.time,sigma.f2,sigma.n2){return(-log_pi_l(l,x,end.time,sigma.f2,sigma.n2))}
l.cur <- stats::optim(1,fn = marginal_l,x=smoothed[A],end.time = end.time, sigma.f2 = sigma.f2, sigma.n2 = sigma.n2, method = 'Brent', lower=0, upper = 2*end.time)$par
return(list(l = l.cur, smoothed = smoothed))
}
#' get L from a filepath
#'
#' @param filepath filepath
#' @param save.file save.file
#'
#' @return l values
#' @export
#'
#' @examples
get_l <- function(filepath, save.file = T){
# load in results
load(filepath)
# Create output to store l results
output <- matrix(NA,ncol = length(PWC), nrow = ncol(raw))
# Loop over each spike sequence.
for( i in 1:ncol(raw)){
if(is.na(spikes[1,i])){next}
print(i)
# Find the experiment time for the spike sequence
end.time.real <- max(spikes[,i],na.rm = T)
# loop over all the ISI distributions
for(j in 1:length(PWC)){
x.pwc <- PWC[[j]][[1]][i,]
if(is.na(x.pwc[1])){next}
output[i,j] <- get_l_single(x.pwc,end.time.real)$l
}
}
if(save.file){
new.file <- gsub(pattern = ".Rdata",replacement = "_lvalues.csv",x =filepath)
colnames(output) <- names(PWC)
utils::write.csv(output,file= new.file)
}
else{
return(output)
}
}
#' Getmean + 95% conf of intensity function
#'
#' @param intensities intensities
#' @param x x
#' @param end.time end.time
#' @param height extra height on plot
#' @param return.val return.val
#' @param add add
#'
#' @return matrix
#' @export
#'
#'
plot.95conf <-function(intensities,end.time,x=NULL,height=0,return.val = FALSE, add = F){
mean.intensities <- colSums(intensities)/dim(intensities)[1]
discret <- length(intensities[1,])
upper <- rep(NA,discret)
lower <- rep(NA,discret)
for ( i in 1:discret){
quant <- stats::quantile(intensities[,i],probs <- c(0.025,0.975), na.rm = T)
lower[i] <- quant[1]
upper[i] <- quant[2]
}
if (return.val == TRUE){
return(list(lower = lower, upper = upper, mean = mean.intensities))
}
else{
if(add == F){
grid <- seq(0,end.time,(end.time/(ncol(intensities)-1)))
base::plot(grid,mean.intensities,ylim=c(0,(max(mean.intensities)+height)), type="n", ylab = "intensity", xlab="time")
graphics::polygon(c(grid,rev(grid)), c(upper, rev(lower)),col = grDevices::rgb(85,75,85,max=255,alpha=100),border=NA)
if (!is.null(x)){ graphics::lines(grid,x,lwd=3)}
graphics::lines(grid,mean.intensities,col="purple",lwd=3)
}
else{
grid <- seq(0,end.time,(end.time/(ncol(intensities)-1)))
graphics::polygon(c(grid,rev(grid)), c(upper, rev(lower)),col = grDevices::rgb(85,75,85,max=255,alpha=100),border=NA)
if (!is.null(x)){ graphics::lines(grid,x,lwd=3)}
graphics::lines(grid,mean.intensities,col="purple",lwd=3)
}
}
}
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.