R/linesearch.R

#' Backtracking for step value selection
#'
#' @param x current function value
#' @param funval objective function calculator
#' @param fungrad objective function's gradient calculator
#' @param p search direction vector
#' @param c backtracking parameter: 0 < c < 1
#' @param alpha_bar initial guess
#' @param rho backtracking step size evolution
#'
#' @return backtracking step value, which brings only sufficient objective function decrease
#'
#' @export
backtracking <- function(x, funval, fungrad, p, c = 0.5, alpha_bar = 1, rho = 0.5) {
  alpha <- alpha_bar
  l <- funval(x + alpha*p)
  r <- funval(x) + c*alpha*t(fungrad(x))%*%p

  while (l > r) {
    alpha <- rho*alpha
    l <- funval(x + alpha*p)
    r <- funval(x) + c*alpha*t(fungrad(x))%*%p
  }

  return(alpha)
}

#' Implements the gradient descent
#'
#' @param x_init initial guess
#' @param funval objective function calculator
#' @param fungrad objective function's gradient
#' @param alpha step size. if "backtracking", will automatically select the step using this algorithm
#' @param maxit max number of iterations
#' @param threshold convergence threshold
#'
#' @import dplyr
#' @return solution list -- solution: x_min, path: x_points, number of iterations: n_it
gradient_descent <- function(x_init, funval, fungrad, alpha, maxit = 1000, threshold = 1e-9) {
  x <- x_init
  points <- tibble(x1 = x[1], x2 = x[2])

  for (it in 1:maxit) {
    # set search direction
    p <- -fungrad(x)

    if (alpha == "backtracking") {
      alpha <- backtracking(x = x,
                            funval = funval,
                            fungrad = fungrad,
                            p = p)
    }

    # move along search direction
    x <- x + alpha*p

    # update path dataframe
    points <- points %>% rbind(as.vector(x))

    # check convergence
    residual_vector <- points[it+1,] - points[it,]
    residual <- norm(as.matrix(residual_vector), type = "2")
    if (residual < threshold) break
  }
  sol <- list(x_min = x, x_points = points, n_it = it)
  return(sol)
}

#' Implements the Newton's method
#'
#' @param x_init initial guess
#' @param funval objective function calculator
#' @param fungrad objective function's gradient
#' @param funhess objective functiond' Hessian
#' @param alpha step size. if "backtracking", will automatically select the step using this algorithm
#' @param maxit max number of iterations
#' @param threshold convergence threshold
#'
#' @import dplyr
#' @return solution list -- solution: x_min, path: x_points, number of iterations: n_it
newtons_method <- function(x_init, funval, fungrad, funhess, alpha = 1, maxit = 1000, threshold = 1e-9) {
  x <- x_init
  points <- tibble(x1 = x[1], x2 = x[2])

  for (it in 1:maxit) {
    # set Newton's direction
    p <- -solve(funhess(x)) %*% fungrad(x)

    # move along search direction
    x <- x + alpha*p

    # update path dataframe
    points <- points %>% rbind(as.vector(x))

    # check convergence
    residual_vector <- points[it+1,] - points[it,]
    residual <- norm(as.matrix(residual_vector), type = "2")
    if (residual < threshold) break
  }
  sol <- list(x_min = x, x_points = points, n_it = it)
  return(sol)
}
lnribeiro/otimizr documentation built on June 24, 2019, 12:37 a.m.