R/s_grad_desc.R

Defines functions s_grad_desc

Documented in s_grad_desc

#' @title Stochastic Gradient Descent
#'
#' @export


s_grad_desc <- function(res, xs, ys, eta = 0.05) {

  # Set up empty matrices
  nabW <- res$weights %>% cerebrum::zero_mat()
  nabB <- res$bias %>% cerebrum::zero_mat()
  nabLen <- 1:(nabW %>% length)

  # Try to loop over the training data - and use back propagation somewhere!
  for (i in 1:(xs %>% nrow)) {
    backwards <- res %>%
      cerebrum::backprop(
        x = xs[i, ],
        y = ys[[i]]
      )

    # Update nablaW
    nabW <- lapply(
      X = nabLen,
      FUN = function(x) nabW[[x]] %>% `+`(backwards$nablaW[[x]])
    )

    # Update nablaB
    nabB <- lapply(
      X = nabLen,
      FUN = function(x) nabB[[x]] %>% `+`(backwards$nablaB[[x]])
    )
  }

  fact <- eta %>% `/`(xs %>% nrow)

  # Finally, update the weights + biases...
  res$weights <- lapply(
    X = nabLen,
    FUN = function(x) res$weights[[x]] %>% `-`(fact * nabW[[x]])
  )

  res$bias <- lapply(
    X = nabLen,
    FUN = function(x) res$bias[[x]] %>% `-`(fact * nabB[[x]])
  )

  # Return res back with updated weights + biases
  return(res)
}
ntyndall/cerebrum documentation built on May 4, 2019, 3:18 p.m.