knitr::opts_chunk$set(
  collapse = TRUE,
  comment = "#>",
  fig.width=4, 
  fig.height=2,
  fig.align = "center"
)
library(cpp)
library(dplyr)

#We want to compare the estimating capabilities of Steepest Descent and the Gradient Descent functions for estimating a Linear Regression models with real data
#how they models fuel consumption through Gross horsepower (hp) and weight (wt)

#Gradient Descent
lm_opt_GD = lm_GD_optimizer(Dep.var ~ . -1, mydataset_noconst, tolerance=1e-3, 
                            maxit=1000, stepsize=1e-5, verbose=T)
lm_opt_GD$beta_hat

#Steepest Descent
lm_opt_SD = lm_SD_optimizer(Dep.var ~ . -1, mydataset_noconst, tolerance=1e-3, 
                            maxit=1000, verbose=T)
lm_opt_SD$beta_hat

#COMPARISON
#The two methods lead to consistent estimates of the beta vectors since their values are quite close to the true ones we used to simulate data
#Nevertheless the Steepest Descend method converges in a fewer number of iterations than the Gradient Descend. We could expect this result from their mathematical formulation: the Steepest Descent descents in the direction of the largest directional derivative, Gradient Descent only cares about descent in the negative gradient direction.

#Gradient Descent
lm_opt_GD_1 = lm_GD_optimizer(Dep.var ~ . -1, mydataset_noconst, tolerance=1e-3, 
                            maxit=1000, stepsize=1e-1, verbose=T)
lm_opt_GD_1
#It doesn't converge.
#The successful convergence of the Gradient Descend method is quite sensible to even small variations of the learning rate parameter as show


FrancescoBarile/FJLPackage documentation built on Dec. 17, 2021, 8:29 p.m.