Nothing
maxNR <- function(fn, grad=NULL, hess=NULL, start,
constraints=NULL,
finalHessian=TRUE,
bhhhHessian=FALSE,
fixed=NULL,
activePar=NULL,
control=NULL,
...) {
## Newton-Raphson maximisation
## Parameters:
## fn - the function to be minimized. Returns either scalar or
## vector value with possible attributes
## constPar and newVal
## grad - gradient function (numeric used if missing). Must return either
## * vector, length=nParam
## * matrix, dim=c(nObs, 1). Treated as vector
## * matrix, dim=c(M, nParam), where M is arbitrary. In this case the
## rows are simply summed (useful for maxBHHH).
## hess - hessian function (numeric used if missing)
## start - initial parameter vector (eventually w/names)
## ... - extra arguments for fn()
## finalHessian include final Hessian? As computing final hessian does not carry any extra penalty for NR method, this option is
## mostly for compatibility reasons with other maxXXX functions.
## TRUE/something else include
## FALSE do not include
## activePar - an index vector -- which parameters are taken as
## variable (free). Other paramters are treated as
## fixed constants
## fixed index vector, which parameters to keep fixed
##
## RESULTS:
## a list of class "maxim":
## maximum function value at maximum
## estimate the parameter value at maximum
## gradient gradient
## hessian Hessian
## code integer code of success:
## 1 - gradient close to zero
## 2 - successive values within tolerance limit
## 3 - could not find a higher point (step error)
## 4 - iteration limit exceeded
## 100 - initial value out of range
## message character message describing the code
## last.step only present if code == 3 (step error). A list with following components:
## theta0 - parameter value which led to the error
## f0 - function value at these parameter values
## climb - the difference between theta0 and the new approximated parameter value (theta1)
## activePar - logical vector, which parameters are active (not constant)
## activePar logical vector, which parameters were treated as free (resp fixed)
## iterations number of iterations
## type "Newton-Raphson maximisation"
##
## ------------------------------
## Add parameters from ... to control
if(!inherits(control, "MaxControl")) {
mControl <- addControlList(maxControl(), control)
}
else {
mControl <- control
}
mControl <- addControlList(mControl, list(...), check=FALSE)
##
argNames <- c(c("fn", "grad", "hess", "start",
"activePar", "fixed", "control"),
openParam(mControl))
# Here we allow to submit all parameters outside of the
# 'control' list. May eventually include only a
# subset here
##
checkFuncArgs( fn, argNames, "fn", "maxNR" )
if( !is.null( grad ) ) {
checkFuncArgs( grad, argNames, "grad", "maxNR" )
}
if( !is.null( hess ) ) {
checkFuncArgs( hess, argNames, "hess", "maxNR" )
}
## establish the active parameters. Internally, we just use 'activePar'
fixed <- prepareFixed( start = start, activePar = activePar,
fixed = fixed )
## chop off the control args from ... and forward the new ...
dddot <- list(...)
dddot <- dddot[!(names(dddot) %in% openParam(mControl))]
cl <- list(start=start,
finalHessian=finalHessian,
bhhhHessian=bhhhHessian,
fixed=fixed,
control=mControl)
if(length(dddot) > 0) {
cl <- c(cl, dddot)
}
##
if(is.null(constraints)) {
## call maxNRCompute with the modified ... list
cl <- c(quote(maxNRCompute),
fn=logLikAttr,
fnOrig = fn, gradOrig = grad, hessOrig = hess,
cl)
result <- eval(as.call(cl))
} else {
if(identical(names(constraints), c("ineqA", "ineqB"))) {
stop("Inequality constraints not implemented for maxNR")
} else if(identical(names(constraints), c("eqA", "eqB"))) {
# equality constraints: A %*% beta + B = 0
cl <- c(quote(sumt),
fn=fn, grad=grad, hess=hess,
maxRoutine=maxNR,
constraints=list(constraints),
cl)
result <- eval(as.call(cl))
} else {
stop("maxNR only supports the following constraints:\n",
"constraints=list(ineqA, ineqB)\n",
"\tfor A %*% beta + B >= 0 linear inequality constraints\n",
"current constraints:",
paste(names(constraints), collapse=" "))
}
}
## Save the objective function
result$objectiveFn <- fn
##
return( result )
}
Any scripts or data that you put into this service are public.
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.