modelLookup: Tools for Models Available in 'train'

Description Usage Arguments Details Value Note Author(s) See Also Examples

Description

These function show information about models and packages that are accessible via train

Usage

1
2
3
4
5

Arguments

model

a character string associated with the method argument of train. If no value is passed, all models are returned. For getModelInfo, regular expressions can be used.

regex

a logical: should a regular expressions be used? If FALSE, a simple match is conducted against the whole name of the model.

pkg

a character string of package names.

...

options to pass to grepl

Details

modelLookup is good for getting information related to the tuning parameters for a model. getModelInfo will return all the functions and metadata associated with a model. Both of these functions will only search within the models bundled in this package.

checkInstall will check to see if packages are installed. If they are not and the session is interactive, an option is given to install the packages using install.packages using that functions default arguments (the missing packages are listed if you would like to install them with other options). If the session is not interactive, an error is thrown.

Value

modelLookup produces a data frame with columns

model

a character string for the model code

parameter

the tuning parameter name

label

a tuning parameter label (used in plots)

forReg

a logical; can the model be used for regression?

forClass

a logical; can the model be used for classification?

probModel

a logical; does the model produce class probabilities?

getModelInfo returns a list containing one or more lists of the standard model information.

checkInstall returns not value.

Note

The column seq is no longer included in the output of modelLookup.

Author(s)

Max Kuhn

See Also

train, install.packages, grepl

Examples

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
modelLookup()
modelLookup("gbm")

getModelInfo("pls")
getModelInfo("^pls")
getModelInfo("pls", regex = FALSE)

## Not run: 
checkInstall(getModelInfo("pls")$library)

## End(Not run)

Example output

Loading required package: lattice
Loading required package: ggplot2
                  model           parameter
12                ANFIS          num.labels
13                ANFIS            max.iter
4                AdaBag              mfinal
5                AdaBag            maxdepth
6           AdaBoost.M1              mfinal
7           AdaBoost.M1            maxdepth
8           AdaBoost.M1           coeflearn
42                BstLm               mstop
43                BstLm                  nu
49                 C5.0              trials
50                 C5.0               model
51                 C5.0              winnow
52             C5.0Cost              trials
53             C5.0Cost               model
54             C5.0Cost              winnow
55             C5.0Cost                cost
56            C5.0Rules           parameter
57             C5.0Tree           parameter
62               CSimca           parameter
75               DENFIS                Dthr
76               DENFIS            max.iter
102             FH.GBML        max.num.rule
103             FH.GBML           popu.size
104             FH.GBML             max.gen
105              FIR.DM          num.labels
106              FIR.DM            max.iter
109           FRBCS.CHI          num.labels
110           FRBCS.CHI             type.mf
111             FRBCS.W          num.labels
112             FRBCS.W             type.mf
113              FS.HGD          num.labels
114              FS.HGD            max.iter
136        GFS.FR.MOGUL             max.gen
137        GFS.FR.MOGUL            max.iter
138        GFS.FR.MOGUL            max.tune
139           GFS.LT.RS           popu.size
140           GFS.LT.RS          num.labels
141           GFS.LT.RS             max.gen
142          GFS.THRIFT           popu.size
143          GFS.THRIFT          num.labels
144          GFS.THRIFT             max.gen
163               HYFIS          num.labels
164               HYFIS            max.iter
166                 J48                   C
167                 J48                   M
168                JRip              NumOpt
169                JRip            NumFolds
170                JRip          MinWeights
191                 LMT                iter
188               Linda           parameter
195          LogitBoost               nIter
206                  M5              pruned
207                  M5            smoothed
208                  M5               rules
209             M5Rules              pruned
210             M5Rules            smoothed
214                Mlda           parameter
312              ORFlog                mtry
313              ORFpls                mtry
314            ORFridge                mtry
315              ORFsvm                mtry
305                OneR           parameter
319                PART           threshold
320                PART              pruned
339                PRIM          peel.alpha
340                PRIM         paste.alpha
341                PRIM            mass.min
330        PenalizedLDA              lambda
331        PenalizedLDA                   K
345              QdaCov           parameter
367               RFlda                   q
393                 RRF                mtry
394                 RRF             coefReg
395                 RRF             coefImp
396           RRFglobal                mtry
397           RRFglobal             coefReg
401              RSimca           parameter
356             Rborist           predFixed
357             Rborist             minNode
406                 SBC                 r.a
407                 SBC            eps.high
408                 SBC             eps.low
414               SLAVE          num.labels
415               SLAVE            max.iter
416               SLAVE             max.gen
476                  WM          num.labels
477                  WM             type.mf
1                   ada                iter
2                   ada            maxdepth
3                   ada                  nu
9              adaboost               nIter
10             adaboost              method
11                amdai               model
14               avNNet                size
15               avNNet               decay
16               avNNet                 bag
17                 awnb              smooth
18                awtan               score
19                awtan              smooth
20                  bag                vars
21             bagEarth              nprune
22             bagEarth              degree
23          bagEarthGCV              degree
24               bagFDA              degree
25               bagFDA              nprune
26            bagFDAGCV              degree
27                  bam              select
28                  bam              method
29          bartMachine           num_trees
30          bartMachine                   k
31          bartMachine               alpha
32          bartMachine                beta
33          bartMachine                  nu
34             bayesglm           parameter
35                binda        lambda.freqs
36           blackboost               mstop
37           blackboost            maxdepth
38               blasso            sparsity
39       blassoAveraged           parameter
40               bridge           parameter
41                 brnn             neurons
44                bstSm               mstop
45                bstSm                  nu
46              bstTree               mstop
47              bstTree            maxdepth
48              bstTree                  nu
58              cforest                mtry
59                chaid              alpha2
60                chaid              alpha3
61                chaid              alpha4
63                ctree        mincriterion
64               ctree2            maxdepth
65               ctree2        mincriterion
66               cubist          committees
67               cubist           neighbors
68                  dda               model
69                  dda           shrinkage
70            deepboost            num_iter
71            deepboost          tree_depth
72            deepboost                beta
73            deepboost              lambda
74            deepboost           loss_type
77                  dnn              layer1
78                  dnn              layer2
79                  dnn              layer3
80                  dnn      hidden_dropout
81                  dnn     visible_dropout
82            dwdLinear              lambda
83            dwdLinear                qval
84              dwdPoly              lambda
85              dwdPoly                qval
86              dwdPoly              degree
87              dwdPoly               scale
88            dwdRadial              lambda
89            dwdRadial                qval
90            dwdRadial               sigma
91                earth              nprune
92                earth              degree
93                  elm                nhid
94                  elm              actfun
95                 enet            fraction
96                 enet              lambda
97               evtree               alpha
98           extraTrees                mtry
99           extraTrees       numRandomCuts
100                 fda              degree
101                 fda              nprune
107                foba                   k
108                foba              lambda
115                 gam              select
116                 gam              method
119            gamLoess                span
120            gamLoess              degree
121           gamSpline                  df
117            gamboost               mstop
118            gamboost               prune
122       gaussprLinear           parameter
123         gaussprPoly              degree
124         gaussprPoly               scale
125       gaussprRadial               sigma
131                 gbm             n.trees
132                 gbm   interaction.depth
133                 gbm           shrinkage
134                 gbm      n.minobsinnode
126             gbm_h2o              ntrees
127             gbm_h2o           max_depth
128             gbm_h2o            min_rows
129             gbm_h2o          learn_rate
130             gbm_h2o     col_sample_rate
135            gcvEarth              degree
146                 glm           parameter
145              glm.nb                link
153          glmStepAIC           parameter
147            glmboost               mstop
148            glmboost               prune
151              glmnet               alpha
152              glmnet              lambda
149          glmnet_h2o               alpha
150          glmnet_h2o              lambda
154                gpls              K.prov
155                 hda               gamma
156                 hda              lambda
157                 hda              newdim
158                hdda           threshold
159                hdda               model
160               hdrda               gamma
161               hdrda              lambda
162               hdrda      shrinkage_type
165                 icr              n.comp
171           kernelpls               ncomp
172                kknn                kmax
173                kknn            distance
174                kknn              kernel
175                 knn                   k
176            krlsPoly              lambda
177            krlsPoly              degree
178          krlsRadial              lambda
179          krlsRadial               sigma
180                lars            fraction
181               lars2                step
182               lasso            fraction
183                 lda           parameter
184                lda2               dimen
185        leapBackward               nvmax
186         leapForward               nvmax
187             leapSeq               nvmax
189                  lm           intercept
190           lmStepAIC           parameter
192              loclda                   k
193            logicBag             nleaves
194            logicBag              ntrees
196              logreg            treesize
197              logreg              ntrees
198         lssvmLinear                 tau
199           lssvmPoly              degree
200           lssvmPoly               scale
201           lssvmPoly                 tau
202         lssvmRadial               sigma
203         lssvmRadial                 tau
204                 lvq                size
205                 lvq                   k
211                manb              smooth
212                manb               prior
213                 mda          subclasses
215                 mlp                size
216       mlpKerasDecay                size
217       mlpKerasDecay              lambda
218       mlpKerasDecay          batch_size
219       mlpKerasDecay                  lr
220       mlpKerasDecay                 rho
221       mlpKerasDecay               decay
222       mlpKerasDecay          activation
223   mlpKerasDecayCost                size
224   mlpKerasDecayCost              lambda
225   mlpKerasDecayCost          batch_size
226   mlpKerasDecayCost                  lr
227   mlpKerasDecayCost                 rho
228   mlpKerasDecayCost               decay
229   mlpKerasDecayCost                cost
230   mlpKerasDecayCost          activation
231     mlpKerasDropout                size
232     mlpKerasDropout             dropout
233     mlpKerasDropout          batch_size
234     mlpKerasDropout                  lr
235     mlpKerasDropout                 rho
236     mlpKerasDropout               decay
237     mlpKerasDropout          activation
238 mlpKerasDropoutCost                size
239 mlpKerasDropoutCost             dropout
240 mlpKerasDropoutCost          batch_size
241 mlpKerasDropoutCost                  lr
242 mlpKerasDropoutCost                 rho
243 mlpKerasDropoutCost               decay
244 mlpKerasDropoutCost                cost
245 mlpKerasDropoutCost          activation
246               mlpML              layer1
247               mlpML              layer2
248               mlpML              layer3
249              mlpSGD                size
250              mlpSGD               l2reg
251              mlpSGD              lambda
252              mlpSGD          learn_rate
253              mlpSGD            momentum
254              mlpSGD               gamma
255              mlpSGD         minibatchsz
256              mlpSGD             repeats
257      mlpWeightDecay                size
258      mlpWeightDecay               decay
259    mlpWeightDecayML              layer1
260    mlpWeightDecayML              layer2
261    mlpWeightDecayML              layer3
262    mlpWeightDecayML               decay
263              monmlp             hidden1
264              monmlp          n.ensemble
265             msaenet              alphas
266             msaenet              nsteps
267             msaenet               scale
268            multinom               decay
269               mxnet              layer1
270               mxnet              layer2
271               mxnet              layer3
272               mxnet       learning.rate
273               mxnet            momentum
274               mxnet             dropout
275               mxnet          activation
276           mxnetAdam              layer1
277           mxnetAdam              layer2
278           mxnetAdam              layer3
279           mxnetAdam             dropout
280           mxnetAdam               beta1
281           mxnetAdam               beta2
282           mxnetAdam        learningrate
283           mxnetAdam          activation
284         naive_bayes             laplace
285         naive_bayes           usekernel
286         naive_bayes              adjust
287                  nb                  fL
288                  nb           usekernel
289                  nb              adjust
290          nbDiscrete              smooth
291            nbSearch                   k
292            nbSearch             epsilon
293            nbSearch              smooth
294            nbSearch        final_smooth
295            nbSearch           direction
296           neuralnet              layer1
297           neuralnet              layer2
298           neuralnet              layer3
299                nnet                size
300                nnet               decay
301                nnls           parameter
302         nodeHarvest            maxinter
303         nodeHarvest                mode
304                null           parameter
306          ordinalNet               alpha
307          ordinalNet            criteria
308          ordinalNet                link
309           ordinalRF               nsets
310           ordinalRF         ntreeperdiv
311           ordinalRF          ntreefinal
316                ownn                   K
317                 pam           threshold
318               parRF                mtry
321             partDSA      cut.off.growth
322             partDSA                 MPD
323             pcaNNet                size
324             pcaNNet               decay
325                 pcr               ncomp
326                 pda              lambda
327                pda2                  df
328           penalized             lambda1
329           penalized             lambda2
332                 plr              lambda
333                 plr                  cp
334                 pls               ncomp
335             plsRglm                  nt
336             plsRglm   alpha.pvals.expli
337                polr              method
338                 ppr              nterms
342          protoclass                 eps
343          protoclass           Minkowski
344                 qda           parameter
346                 qrf                mtry
347                qrnn            n.hidden
348                qrnn             penalty
349                qrnn                 bag
366              rFerns               depth
350           randomGLM maxInteractionOrder
351              ranger                mtry
352              ranger           splitrule
353              ranger       min.node.size
354                 rbf                size
355              rbfDDA   negativeThreshold
358                 rda               gamma
359                 rda              lambda
360         regLogistic                cost
361         regLogistic                loss
362         regLogistic             epsilon
363              relaxo              lambda
364              relaxo                 phi
365                  rf                mtry
368             rfRules                mtry
369             rfRules            maxdepth
370               ridge              lambda
371                rlda           estimator
372                 rlm           intercept
373                 rlm                 psi
374                rmda                   K
375                rmda               model
376                rocc              xgenes
377      rotationForest                   K
378      rotationForest                   L
379    rotationForestCp                   K
380    rotationForestCp                   L
381    rotationForestCp                  cp
382               rpart                  cp
383            rpart1SE           parameter
384              rpart2            maxdepth
385           rpartCost                  cp
386           rpartCost                Cost
387          rpartScore                  cp
388          rpartScore               split
389          rpartScore               prune
390             rqlasso              lambda
391                rqnc              lambda
392                rqnc             penalty
398               rrlda              lambda
399               rrlda                  hp
400               rrlda             penalty
402           rvmLinear           parameter
403             rvmPoly               scale
404             rvmPoly              degree
405           rvmRadial               sigma
409                 sda            diagonal
410                 sda              lambda
411                sdwd              lambda
412                sdwd             lambda2
413              simpls               ncomp
417                slda           parameter
418                smda             NumVars
419                smda              lambda
420                smda                   R
421                 snn              lambda
422           sparseLDA             NumVars
423           sparseLDA              lambda
424           spikeslab                vars
425                spls                   K
426                spls                 eta
427                spls               kappa
428             stepLDA              maxvar
429             stepLDA           direction
430             stepQDA              maxvar
431             stepQDA           direction
432             superpc           threshold
433             superpc        n.components
434 svmBoundrangeString              length
435 svmBoundrangeString                   C
436       svmExpoString              lambda
437       svmExpoString                   C
438           svmLinear                   C
439          svmLinear2                cost
440          svmLinear3                cost
441          svmLinear3                Loss
442    svmLinearWeights                cost
443    svmLinearWeights              weight
444   svmLinearWeights2                cost
445   svmLinearWeights2                Loss
446   svmLinearWeights2              weight
447             svmPoly              degree
448             svmPoly               scale
449             svmPoly                   C
450           svmRadial               sigma
451           svmRadial                   C
452       svmRadialCost                   C
453      svmRadialSigma               sigma
454      svmRadialSigma                   C
455    svmRadialWeights               sigma
456    svmRadialWeights                   C
457    svmRadialWeights              Weight
458   svmSpectrumString              length
459   svmSpectrumString                   C
460                 tan               score
461                 tan              smooth
462           tanSearch                   k
463           tanSearch             epsilon
464           tanSearch              smooth
465           tanSearch        final_smooth
466           tanSearch                  sp
467             treebag           parameter
468          vbmpRadial       estimateTheta
469          vglmAdjCat            parallel
470          vglmAdjCat                link
471       vglmContRatio            parallel
472       vglmContRatio                link
473      vglmCumulative            parallel
474      vglmCumulative                link
475       widekernelpls               ncomp
478                wsrf                mtry
479             xgbDART             nrounds
480             xgbDART           max_depth
481             xgbDART                 eta
482             xgbDART               gamma
483             xgbDART           subsample
484             xgbDART    colsample_bytree
485             xgbDART           rate_drop
486             xgbDART           skip_drop
487             xgbDART    min_child_weight
488           xgbLinear             nrounds
489           xgbLinear              lambda
490           xgbLinear               alpha
491           xgbLinear                 eta
492             xgbTree             nrounds
493             xgbTree           max_depth
494             xgbTree                 eta
495             xgbTree               gamma
496             xgbTree    colsample_bytree
497             xgbTree    min_child_weight
498             xgbTree           subsample
499                 xyf                xdim
500                 xyf                ydim
501                 xyf        user.weights
502                 xyf                topo
                                                                                      label
12                                                                             #Fuzzy Terms
13                                                                          Max. Iterations
4                                                                                    #Trees
5                                                                            Max Tree Depth
6                                                                                    #Trees
7                                                                            Max Tree Depth
8                                                                          Coefficient Type
42                                                                    # Boosting Iterations
43                                                                                Shrinkage
49                                                                    # Boosting Iterations
50                                                                               Model Type
51                                                                                   Winnow
52                                                                    # Boosting Iterations
53                                                                               Model Type
54                                                                                   Winnow
55                                                                                     Cost
56                                                                                     none
57                                                                                     none
62                                                                                parameter
75                                                                                Threshold
76                                                                          Max. Iterations
102                                                                             Max. #Rules
103                                                                         Population Size
104                                                                        Max. Generations
105                                                                            #Fuzzy Terms
106                                                                         Max. Iterations
109                                                                            #Fuzzy Terms
110                                                                     Membership Function
111                                                                            #Fuzzy Terms
112                                                                     Membership Function
113                                                                            #Fuzzy Terms
114                                                                         Max. Iterations
136                                                                        Max. Generations
137                                                                         Max. Iterations
138                                                                  Max. Tuning Iterations
139                                                                         Population Size
140                                                                          # Fuzzy Labels
141                                                                        Max. Generations
142                                                                         Population Size
143                                                                          # Fuzzy Labels
144                                                                        Max. Generations
163                                                                            #Fuzzy Terms
164                                                                         Max. Iterations
166                                                                    Confidence Threshold
167                                                              Minimum Instances Per Leaf
168                                                                         # Optimizations
169                                                                                 # Folds
170                                                                             Min Weights
191                                                                             # Iteratons
188                                                                                    none
195                                                                   # Boosting Iterations
206                                                                                  Pruned
207                                                                                Smoothed
208                                                                                   Rules
209                                                                                  Pruned
210                                                                                Smoothed
214                                                                               parameter
312                                                           #Randomly Selected Predictors
313                                                           #Randomly Selected Predictors
314                                                           #Randomly Selected Predictors
315                                                           #Randomly Selected Predictors
305                                                                                    none
319                                                                    Confidence Threshold
320                                                                                 Pruning
339                                                                        peeling quantile
340                                                                        pasting quantile
341                                                                            minimum mass
330                                                                              L1 Penalty
331                                                                 #Discriminant Functions
345                                                                               parameter
367                                                                               # Factors
393                                                           #Randomly Selected Predictors
394                                                                    Regularization Value
395                                                                  Importance Coefficient
396                                                           #Randomly Selected Predictors
397                                                                    Regularization Value
401                                                                               parameter
356                                                           #Randomly Selected Predictors
357                                                                       Minimal Node Size
406                                                                                  Radius
407                                                                         Upper Threshold
408                                                                         Lower Threshold
414                                                                            #Fuzzy Terms
415                                                                         Max. Iterations
416                                                                        Max. Generations
476                                                                            #Fuzzy Terms
477                                                                     Membership Function
1                                                                                    #Trees
2                                                                            Max Tree Depth
3                                                                             Learning Rate
9                                                                                    #Trees
10                                                                                   Method
11                                                                               Model Type
14                                                                            #Hidden Units
15                                                                             Weight Decay
16                                                                                  Bagging
17                                                                      Smoothing Parameter
18                                                                           Score Function
19                                                                      Smoothing Parameter
20                                                            #Randomly Selected Predictors
21                                                                                   #Terms
22                                                                           Product Degree
23                                                                           Product Degree
24                                                                           Product Degree
25                                                                                   #Terms
26                                                                           Product Degree
27                                                                        Feature Selection
28                                                                                   Method
29                                                                                   #Trees
30                                                                           Prior Boundary
31                                                        Base Terminal Node Hyperparameter
32                                                       Power Terminal Node Hyperparameter
33                                                                       Degrees of Freedom
34                                                                                parameter
35                                                                      Shrinkage Intensity
36                                                                                   #Trees
37                                                                           Max Tree Depth
38                                                                       Sparsity Threshold
39                                                                                parameter
40                                                                                parameter
41                                                                                # Neurons
44                                                                    # Boosting Iterations
45                                                                                Shrinkage
46                                                                    # Boosting Iterations
47                                                                           Max Tree Depth
48                                                                                Shrinkage
58                                                            #Randomly Selected Predictors
59                                                                        Merging Threshold
60                                                        Splitting former Merged Threshold
61  \n                                                    Splitting former Merged Threshold
63                                                                    1 - P-Value Threshold
64                                                                           Max Tree Depth
65                                                                    1 - P-Value Threshold
66                                                                              #Committees
67                                                                               #Instances
68                                                                                    Model
69                                                                           Shrinkage Type
70                                                                    # Boosting Iterations
71                                                                               Tree Depth
72                                                                        L1 Regularization
73                                                                Tree Depth Regularization
74                                                                                     Loss
77                                                                           Hidden Layer 1
78                                                                           Hidden Layer 2
79                                                                           Hidden Layer 3
80                                                                          Hidden Dropouts
81                                                                          Visible Dropout
82                                                                 Regularization Parameter
83                                                                                        q
84                                                                 Regularization Parameter
85                                                                                        q
86                                                                        Polynomial Degree
87                                                                                    Scale
88                                                                 Regularization Parameter
89                                                                                        q
90                                                                                    Sigma
91                                                                                   #Terms
92                                                                           Product Degree
93                                                                            #Hidden Units
94                                                                      Activation Function
95                                                                Fraction of Full Solution
96                                                                             Weight Decay
97                                                                     Complexity Parameter
98                                                           # Randomly Selected Predictors
99                                                                            # Random Cuts
100                                                                          Product Degree
101                                                                                  #Terms
107                                                                     #Variables Retained
108                                                                              L2 Penalty
115                                                                       Feature Selection
116                                                                                  Method
119                                                                                    Span
120                                                                                  Degree
121                                                                      Degrees of Freedom
117                                                                   # Boosting Iterations
118                                                                              AIC Prune?
122                                                                               Parameter
123                                                                       Polynomial Degree
124                                                                                   Scale
125                                                                                   Sigma
131                                                                   # Boosting Iterations
132                                                                          Max Tree Depth
133                                                                               Shrinkage
134                                                                 Min. Terminal Node Size
126                                                                   # Boosting Iterations
127                                                                          Max Tree Depth
128                                                                 Min. Terminal Node Size
129                                                                               Shrinkage
130                                                           #Randomly Selected Predictors
135                                                                          Product Degree
146                                                                               parameter
145                                                                           Link Function
153                                                                               parameter
147                                                                   # Boosting Iterations
148                                                                              AIC Prune?
151                                                                       Mixing Percentage
152                                                                Regularization Parameter
149                                                                       Mixing Percentage
150                                                                Regularization Parameter
154                                                                             #Components
155                                                                                   Gamma
156                                                                                  Lambda
157                                                Dimension of the Discriminative Subspace
158                                                                               Threshold
159                                                                              Model Type
160                                                                                   Gamma
161                                                                                  Lambda
162                                                                          Shrinkage Type
165                                                                             #Components
171                                                                             #Components
172                                                                         Max. #Neighbors
173                                                                                Distance
174                                                                                  Kernel
175                                                                              #Neighbors
176                                                                Regularization Parameter
177                                                                       Polynomial Degree
178                                                                Regularization Parameter
179                                                                                   Sigma
180                                                                                Fraction
181                                                                                  #Steps
182                                                               Fraction of Full Solution
183                                                                               parameter
184                                                                 #Discriminant Functions
185                                                            Maximum Number of Predictors
186                                                            Maximum Number of Predictors
187                                                            Maximum Number of Predictors
189                                                                               intercept
190                                                                               parameter
192                                                                      #Nearest Neighbors
193                                                                Maximum Number of Leaves
194                                                                         Number of Trees
196                                                                Maximum Number of Leaves
197                                                                         Number of Trees
198                                                                Regularization Parameter
199                                                                       Polynomial Degree
200                                                                                   Scale
201                                                                Regularization Parameter
202                                                                                   Sigma
203                                                                Regularization Parameter
204                                                                           Codebook Size
205                                                                             #Prototypes
211                                                                     Smoothing Parameter
212                                                                       Prior Probability
213                                                                   #Subclasses Per Class
215                                                                           #Hidden Units
216                                                                           #Hidden Units
217                                                                       L2 Regularization
218                                                                              Batch Size
219                                                                           Learning Rate
220                                                                                     Rho
221                                                                     Learning Rate Decay
222                                                                     Activation Function
223                                                                           #Hidden Units
224                                                                       L2 Regularization
225                                                                              Batch Size
226                                                                           Learning Rate
227                                                                                     Rho
228                                                                     Learning Rate Decay
229                                                                                    Cost
230                                                                     Activation Function
231                                                                           #Hidden Units
232                                                                            Dropout Rate
233                                                                              Batch Size
234                                                                           Learning Rate
235                                                                                     Rho
236                                                                     Learning Rate Decay
237                                                                     Activation Function
238                                                                           #Hidden Units
239                                                                            Dropout Rate
240                                                                              Batch Size
241                                                                           Learning Rate
242                                                                                     Rho
243                                                                     Learning Rate Decay
244                                                                                    Cost
245                                                                     Activation Function
246                                                                    #Hidden Units layer1
247                                                                    #Hidden Units layer2
248                                                                    #Hidden Units layer3
249                                                                           #Hidden Units
250                                                                       L2 Regularization
251                                                                   RMSE Gradient Scaling
252                                                                           Learning Rate
253                                                                                Momentum
254                                                                     Learning Rate Decay
255                                                                              Batch Size
256                                                                                 #Models
257                                                                           #Hidden Units
258                                                                            Weight Decay
259                                                                    #Hidden Units layer1
260                                                                    #Hidden Units layer2
261                                                                    #Hidden Units layer3
262                                                                            Weight Decay
263                                                                           #Hidden Units
264                                                                                 #Models
265                                                                                   Alpha
266                                                              #Adaptive Estimation Steps
267                                                          Adaptive Weight Scaling Factor
268                                                                            Weight Decay
269                                                                #Hidden Units in Layer 1
270                                                                #Hidden Units in Layer 2
271                                                                #Hidden Units in Layer 3
272                                                                           Learning Rate
273                                                                                Momentum
274                                                                            Dropout Rate
275                                                                     Activation Function
276                                                                #Hidden Units in Layer 1
277                                                                #Hidden Units in Layer 2
278                                                                #Hidden Units in Layer 3
279                                                                            Dropout Rate
280                                                                                   beta1
281                                                                                   beta2
282                                                                           Learning Rate
283                                                                     Activation Function
284                                                                      Laplace Correction
285                                                                       Distribution Type
286                                                                    Bandwidth Adjustment
287                                                                      Laplace Correction
288                                                                       Distribution Type
289                                                                    Bandwidth Adjustment
290                                                                     Smoothing Parameter
291                                                                                  #Folds
292                                                            Minimum Absolute Improvement
293                                                                     Smoothing Parameter
294                                                               Final Smoothing Parameter
295                                                                        Search Direction
296                                                                #Hidden Units in Layer 1
297                                                                #Hidden Units in Layer 2
298                                                                #Hidden Units in Layer 3
299                                                                           #Hidden Units
300                                                                            Weight Decay
301                                                                               parameter
302                                                               Maximum Interaction Depth
303                                                                         Prediction Mode
304                                                                               parameter
306                                                                       Mixing Percentage
307                                                                     Selection Criterion
308                                                                           Link Function
309                                           # score sets tried prior to the approximation
310                                                                  # of trees (small RFs)
311                                                                   # of trees (final RF)
316                                                                              #Neighbors
317                                                                     Shrinkage Threshold
318                                                           #Randomly Selected Predictors
321                                                           Number of Terminal Partitions
322                                                              Minimum Percent Difference
323                                                                           #Hidden Units
324                                                                            Weight Decay
325                                                                             #Components
326                                                           Shrinkage Penalty Coefficient
327                                                                      Degrees of Freedom
328                                                                              L1 Penalty
329                                                                              L2 Penalty
332                                                                              L2 Penalty
333                                                                    Complexity Parameter
334                                                                             #Components
335                                                                         #PLS Components
336                                                                       p-Value threshold
337                                                                               parameter
338                                                                                 # Terms
342                                                                               Ball Size
343                                                                          Distance Order
344                                                                               parameter
346                                                           #Randomly Selected Predictors
347                                                                           #Hidden Units
348                                                                            Weight Decay
349                                                                          Bagged Models?
366                                                                              Fern Depth
350                                                                       Interaction Order
351                                                           #Randomly Selected Predictors
352                                                                          Splitting Rule
353                                                                       Minimal Node Size
354                                                                           #Hidden Units
355                                                Activation Limit for Conflicting Classes
358                                                                                   Gamma
359                                                                                  Lambda
360                                                                                    Cost
361                                                                           Loss Function
362                                                                               Tolerance
363                                                                       Penalty Parameter
364                                                                    Relaxation Parameter
365                                                           #Randomly Selected Predictors
368                                                           #Randomly Selected Predictors
369                                                                      Maximum Rule Depth
370                                                                            Weight Decay
371                                                                   Regularization Method
372                                                                               intercept
373                                                                                     psi
374                                                                   #Subclasses Per Class
375                                                                                   Model
376                                                                     #Variables Retained
377                                                                       #Variable Subsets
378                                                                           Ensemble Size
379                                                                       #Variable Subsets
380                                                                           Ensemble Size
381                                                                    Complexity Parameter
382                                                                    Complexity Parameter
383                                                                               parameter
384                                                                          Max Tree Depth
385                                                                    Complexity Parameter
386                                                                                    Cost
387                                                                    Complexity Parameter
388                                                                          Split Function
389                                                                         Pruning Measure
390                                                                              L1 Penalty
391                                                                              L1 Penalty
392                                                                            Penalty Type
398                                                                       Penalty Parameter
399                                                                    Robustness Parameter
400                                                                            Penalty Type
402                                                                               parameter
403                                                                                   Scale
404                                                                       Polynomial Degree
405                                                                                   Sigma
409                                                                             Diagonalize
410                                                                               shrinkage
411                                                                              L1 Penalty
412                                                                              L2 Penalty
413                                                                             #Components
417                                                                                    none
418                                                                            # Predictors
419                                                                                  Lambda
420                                                                            # Subclasses
421                                                                 Stabilization Parameter
422                                                                            # Predictors
423                                                                                  Lambda
424                                                                      Variables Retained
425                                                                             #Components
426                                                                               Threshold
427                                                                                   Kappa
428                                                                      Maximum #Variables
429                                                                        Search Direction
430                                                                      Maximum #Variables
431                                                                        Search Direction
432                                                                               Threshold
433                                                                             #Components
434                                                                                  length
435                                                                                    Cost
436                                                                                  lambda
437                                                                                    Cost
438                                                                                    Cost
439                                                                                    Cost
440                                                                                    Cost
441                                                                           Loss Function
442                                                                                    Cost
443                                                                            Class Weight
444                                                                                    Cost
445                                                                           Loss Function
446                                                                            Class Weight
447                                                                       Polynomial Degree
448                                                                                   Scale
449                                                                                    Cost
450                                                                                   Sigma
451                                                                                    Cost
452                                                                                    Cost
453                                                                                   Sigma
454                                                                                    Cost
455                                                                                   Sigma
456                                                                                    Cost
457                                                                                  Weight
458                                                                                  length
459                                                                                    Cost
460                                                                          Score Function
461                                                                     Smoothing Parameter
462                                                                                  #Folds
463                                                            Minimum Absolute Improvement
464                                                                     Smoothing Parameter
465                                                               Final Smoothing Parameter
466                                                                            Super-Parent
467                                                                               parameter
468                                                                         Theta Estimated
469                                                                         Parallel Curves
470                                                                           Link Function
471                                                                         Parallel Curves
472                                                                           Link Function
473                                                                         Parallel Curves
474                                                                           Link Function
475                                                                             #Components
478                                                           #Randomly Selected Predictors
479                                                                   # Boosting Iterations
480                                                                          Max Tree Depth
481                                                                               Shrinkage
482                                                                  Minimum Loss Reduction
483                                                                    Subsample Percentage
484                                                              Subsample Ratio of Columns
485                                                               Fraction of Trees Dropped
486                                                              Prob. of Skipping Drop-out
487                                                          Minimum Sum of Instance Weight
488                                                                   # Boosting Iterations
489                                                                       L2 Regularization
490                                                                       L1 Regularization
491                                                                           Learning Rate
492                                                                   # Boosting Iterations
493                                                                          Max Tree Depth
494                                                                               Shrinkage
495                                                                  Minimum Loss Reduction
496                                                              Subsample Ratio of Columns
497                                                          Minimum Sum of Instance Weight
498                                                                    Subsample Percentage
499                                                                                    Rows
500                                                                                 Columns
501                                                                            Layer Weight
502                                                                                Topology
    forReg forClass probModel
12    TRUE    FALSE     FALSE
13    TRUE    FALSE     FALSE
4    FALSE     TRUE      TRUE
5    FALSE     TRUE      TRUE
6    FALSE     TRUE      TRUE
7    FALSE     TRUE      TRUE
8    FALSE     TRUE      TRUE
42    TRUE     TRUE     FALSE
43    TRUE     TRUE     FALSE
49   FALSE     TRUE      TRUE
50   FALSE     TRUE      TRUE
51   FALSE     TRUE      TRUE
52   FALSE     TRUE     FALSE
53   FALSE     TRUE     FALSE
54   FALSE     TRUE     FALSE
55   FALSE     TRUE     FALSE
56   FALSE     TRUE      TRUE
57   FALSE     TRUE      TRUE
62   FALSE     TRUE     FALSE
75    TRUE    FALSE     FALSE
76    TRUE    FALSE     FALSE
102  FALSE     TRUE     FALSE
103  FALSE     TRUE     FALSE
104  FALSE     TRUE     FALSE
105   TRUE    FALSE     FALSE
106   TRUE    FALSE     FALSE
109  FALSE     TRUE     FALSE
110  FALSE     TRUE     FALSE
111  FALSE     TRUE     FALSE
112  FALSE     TRUE     FALSE
113   TRUE    FALSE     FALSE
114   TRUE    FALSE     FALSE
136   TRUE    FALSE     FALSE
137   TRUE    FALSE     FALSE
138   TRUE    FALSE     FALSE
139   TRUE    FALSE     FALSE
140   TRUE    FALSE     FALSE
141   TRUE    FALSE     FALSE
142   TRUE    FALSE     FALSE
143   TRUE    FALSE     FALSE
144   TRUE    FALSE     FALSE
163   TRUE    FALSE     FALSE
164   TRUE    FALSE     FALSE
166  FALSE     TRUE      TRUE
167  FALSE     TRUE      TRUE
168  FALSE     TRUE      TRUE
169  FALSE     TRUE      TRUE
170  FALSE     TRUE      TRUE
191  FALSE     TRUE      TRUE
188  FALSE     TRUE      TRUE
195  FALSE     TRUE      TRUE
206   TRUE    FALSE     FALSE
207   TRUE    FALSE     FALSE
208   TRUE    FALSE     FALSE
209   TRUE    FALSE     FALSE
210   TRUE    FALSE     FALSE
214  FALSE     TRUE     FALSE
312  FALSE     TRUE      TRUE
313  FALSE     TRUE      TRUE
314  FALSE     TRUE      TRUE
315  FALSE     TRUE      TRUE
305  FALSE     TRUE      TRUE
319  FALSE     TRUE      TRUE
320  FALSE     TRUE      TRUE
339  FALSE     TRUE      TRUE
340  FALSE     TRUE      TRUE
341  FALSE     TRUE      TRUE
330  FALSE     TRUE     FALSE
331  FALSE     TRUE     FALSE
345  FALSE     TRUE      TRUE
367  FALSE     TRUE     FALSE
393   TRUE     TRUE      TRUE
394   TRUE     TRUE      TRUE
395   TRUE     TRUE      TRUE
396   TRUE     TRUE      TRUE
397   TRUE     TRUE      TRUE
401  FALSE     TRUE     FALSE
356   TRUE     TRUE      TRUE
357   TRUE     TRUE      TRUE
406   TRUE    FALSE     FALSE
407   TRUE    FALSE     FALSE
408   TRUE    FALSE     FALSE
414  FALSE     TRUE     FALSE
415  FALSE     TRUE     FALSE
416  FALSE     TRUE     FALSE
476   TRUE    FALSE     FALSE
477   TRUE    FALSE     FALSE
1    FALSE     TRUE      TRUE
2    FALSE     TRUE      TRUE
3    FALSE     TRUE      TRUE
9    FALSE     TRUE      TRUE
10   FALSE     TRUE      TRUE
11   FALSE     TRUE      TRUE
14    TRUE     TRUE      TRUE
15    TRUE     TRUE      TRUE
16    TRUE     TRUE      TRUE
17   FALSE     TRUE      TRUE
18   FALSE     TRUE      TRUE
19   FALSE     TRUE      TRUE
20    TRUE     TRUE      TRUE
21    TRUE     TRUE      TRUE
22    TRUE     TRUE      TRUE
23    TRUE     TRUE      TRUE
24   FALSE     TRUE      TRUE
25   FALSE     TRUE      TRUE
26   FALSE     TRUE      TRUE
27    TRUE     TRUE      TRUE
28    TRUE     TRUE      TRUE
29    TRUE     TRUE      TRUE
30    TRUE     TRUE      TRUE
31    TRUE     TRUE      TRUE
32    TRUE     TRUE      TRUE
33    TRUE     TRUE      TRUE
34    TRUE     TRUE      TRUE
35   FALSE     TRUE      TRUE
36    TRUE     TRUE      TRUE
37    TRUE     TRUE      TRUE
38    TRUE    FALSE     FALSE
39    TRUE    FALSE     FALSE
40    TRUE    FALSE     FALSE
41    TRUE    FALSE     FALSE
44    TRUE     TRUE     FALSE
45    TRUE     TRUE     FALSE
46    TRUE     TRUE     FALSE
47    TRUE     TRUE     FALSE
48    TRUE     TRUE     FALSE
58    TRUE     TRUE      TRUE
59   FALSE     TRUE      TRUE
60   FALSE     TRUE      TRUE
61   FALSE     TRUE      TRUE
63    TRUE     TRUE      TRUE
64    TRUE     TRUE      TRUE
65    TRUE     TRUE      TRUE
66    TRUE    FALSE     FALSE
67    TRUE    FALSE     FALSE
68   FALSE     TRUE      TRUE
69   FALSE     TRUE      TRUE
70   FALSE     TRUE     FALSE
71   FALSE     TRUE     FALSE
72   FALSE     TRUE     FALSE
73   FALSE     TRUE     FALSE
74   FALSE     TRUE     FALSE
77    TRUE     TRUE      TRUE
78    TRUE     TRUE      TRUE
79    TRUE     TRUE      TRUE
80    TRUE     TRUE      TRUE
81    TRUE     TRUE      TRUE
82   FALSE     TRUE      TRUE
83   FALSE     TRUE      TRUE
84   FALSE     TRUE      TRUE
85   FALSE     TRUE      TRUE
86   FALSE     TRUE      TRUE
87   FALSE     TRUE      TRUE
88   FALSE     TRUE      TRUE
89   FALSE     TRUE      TRUE
90   FALSE     TRUE      TRUE
91    TRUE     TRUE      TRUE
92    TRUE     TRUE      TRUE
93    TRUE     TRUE     FALSE
94    TRUE     TRUE     FALSE
95    TRUE    FALSE     FALSE
96    TRUE    FALSE     FALSE
97    TRUE     TRUE      TRUE
98    TRUE     TRUE      TRUE
99    TRUE     TRUE      TRUE
100  FALSE     TRUE      TRUE
101  FALSE     TRUE      TRUE
107   TRUE    FALSE     FALSE
108   TRUE    FALSE     FALSE
115   TRUE     TRUE      TRUE
116   TRUE     TRUE      TRUE
119   TRUE     TRUE      TRUE
120   TRUE     TRUE      TRUE
121   TRUE     TRUE      TRUE
117   TRUE     TRUE      TRUE
118   TRUE     TRUE      TRUE
122   TRUE     TRUE      TRUE
123   TRUE     TRUE      TRUE
124   TRUE     TRUE      TRUE
125   TRUE     TRUE      TRUE
131   TRUE     TRUE      TRUE
132   TRUE     TRUE      TRUE
133   TRUE     TRUE      TRUE
134   TRUE     TRUE      TRUE
126   TRUE     TRUE      TRUE
127   TRUE     TRUE      TRUE
128   TRUE     TRUE      TRUE
129   TRUE     TRUE      TRUE
130   TRUE     TRUE      TRUE
135   TRUE     TRUE      TRUE
146   TRUE     TRUE      TRUE
145   TRUE    FALSE     FALSE
153   TRUE     TRUE      TRUE
147   TRUE     TRUE      TRUE
148   TRUE     TRUE      TRUE
151   TRUE     TRUE      TRUE
152   TRUE     TRUE      TRUE
149   TRUE     TRUE      TRUE
150   TRUE     TRUE      TRUE
154  FALSE     TRUE      TRUE
155  FALSE     TRUE      TRUE
156  FALSE     TRUE      TRUE
157  FALSE     TRUE      TRUE
158  FALSE     TRUE      TRUE
159  FALSE     TRUE      TRUE
160  FALSE     TRUE      TRUE
161  FALSE     TRUE      TRUE
162  FALSE     TRUE      TRUE
165   TRUE    FALSE     FALSE
171   TRUE     TRUE      TRUE
172   TRUE     TRUE      TRUE
173   TRUE     TRUE      TRUE
174   TRUE     TRUE      TRUE
175   TRUE     TRUE      TRUE
176   TRUE    FALSE     FALSE
177   TRUE    FALSE     FALSE
178   TRUE    FALSE     FALSE
179   TRUE    FALSE     FALSE
180   TRUE    FALSE     FALSE
181   TRUE    FALSE     FALSE
182   TRUE    FALSE     FALSE
183  FALSE     TRUE      TRUE
184  FALSE     TRUE      TRUE
185   TRUE    FALSE     FALSE
186   TRUE    FALSE     FALSE
187   TRUE    FALSE     FALSE
189   TRUE    FALSE     FALSE
190   TRUE    FALSE     FALSE
192  FALSE     TRUE      TRUE
193   TRUE     TRUE      TRUE
194   TRUE     TRUE      TRUE
196   TRUE     TRUE      TRUE
197   TRUE     TRUE      TRUE
198  FALSE     TRUE     FALSE
199  FALSE     TRUE     FALSE
200  FALSE     TRUE     FALSE
201  FALSE     TRUE     FALSE
202  FALSE     TRUE     FALSE
203  FALSE     TRUE     FALSE
204  FALSE     TRUE     FALSE
205  FALSE     TRUE     FALSE
211  FALSE     TRUE      TRUE
212  FALSE     TRUE      TRUE
213  FALSE     TRUE      TRUE
215   TRUE     TRUE      TRUE
216   TRUE     TRUE      TRUE
217   TRUE     TRUE      TRUE
218   TRUE     TRUE      TRUE
219   TRUE     TRUE      TRUE
220   TRUE     TRUE      TRUE
221   TRUE     TRUE      TRUE
222   TRUE     TRUE      TRUE
223  FALSE     TRUE      TRUE
224  FALSE     TRUE      TRUE
225  FALSE     TRUE      TRUE
226  FALSE     TRUE      TRUE
227  FALSE     TRUE      TRUE
228  FALSE     TRUE      TRUE
229  FALSE     TRUE      TRUE
230  FALSE     TRUE      TRUE
231   TRUE     TRUE      TRUE
232   TRUE     TRUE      TRUE
233   TRUE     TRUE      TRUE
234   TRUE     TRUE      TRUE
235   TRUE     TRUE      TRUE
236   TRUE     TRUE      TRUE
237   TRUE     TRUE      TRUE
238  FALSE     TRUE      TRUE
239  FALSE     TRUE      TRUE
240  FALSE     TRUE      TRUE
241  FALSE     TRUE      TRUE
242  FALSE     TRUE      TRUE
243  FALSE     TRUE      TRUE
244  FALSE     TRUE      TRUE
245  FALSE     TRUE      TRUE
246   TRUE     TRUE      TRUE
247   TRUE     TRUE      TRUE
248   TRUE     TRUE      TRUE
249   TRUE     TRUE      TRUE
250   TRUE     TRUE      TRUE
251   TRUE     TRUE      TRUE
252   TRUE     TRUE      TRUE
253   TRUE     TRUE      TRUE
254   TRUE     TRUE      TRUE
255   TRUE     TRUE      TRUE
256   TRUE     TRUE      TRUE
257   TRUE     TRUE      TRUE
258   TRUE     TRUE      TRUE
259   TRUE     TRUE      TRUE
260   TRUE     TRUE      TRUE
261   TRUE     TRUE      TRUE
262   TRUE     TRUE      TRUE
263   TRUE     TRUE      TRUE
264   TRUE     TRUE      TRUE
265   TRUE     TRUE      TRUE
266   TRUE     TRUE      TRUE
267   TRUE     TRUE      TRUE
268  FALSE     TRUE      TRUE
269   TRUE     TRUE      TRUE
270   TRUE     TRUE      TRUE
271   TRUE     TRUE      TRUE
272   TRUE     TRUE      TRUE
273   TRUE     TRUE      TRUE
274   TRUE     TRUE      TRUE
275   TRUE     TRUE      TRUE
276   TRUE     TRUE      TRUE
277   TRUE     TRUE      TRUE
278   TRUE     TRUE      TRUE
279   TRUE     TRUE      TRUE
280   TRUE     TRUE      TRUE
281   TRUE     TRUE      TRUE
282   TRUE     TRUE      TRUE
283   TRUE     TRUE      TRUE
284  FALSE     TRUE      TRUE
285  FALSE     TRUE      TRUE
286  FALSE     TRUE      TRUE
287  FALSE     TRUE      TRUE
288  FALSE     TRUE      TRUE
289  FALSE     TRUE      TRUE
290  FALSE     TRUE      TRUE
291  FALSE     TRUE      TRUE
292  FALSE     TRUE      TRUE
293  FALSE     TRUE      TRUE
294  FALSE     TRUE      TRUE
295  FALSE     TRUE      TRUE
296   TRUE    FALSE     FALSE
297   TRUE    FALSE     FALSE
298   TRUE    FALSE     FALSE
299   TRUE     TRUE      TRUE
300   TRUE     TRUE      TRUE
301   TRUE    FALSE     FALSE
302   TRUE     TRUE      TRUE
303   TRUE     TRUE      TRUE
304   TRUE     TRUE      TRUE
306  FALSE     TRUE      TRUE
307  FALSE     TRUE      TRUE
308  FALSE     TRUE      TRUE
309  FALSE     TRUE      TRUE
310  FALSE     TRUE      TRUE
311  FALSE     TRUE      TRUE
316  FALSE     TRUE     FALSE
317  FALSE     TRUE      TRUE
318   TRUE     TRUE      TRUE
321   TRUE     TRUE     FALSE
322   TRUE     TRUE     FALSE
323   TRUE     TRUE      TRUE
324   TRUE     TRUE      TRUE
325   TRUE    FALSE     FALSE
326  FALSE     TRUE      TRUE
327  FALSE     TRUE      TRUE
328   TRUE    FALSE     FALSE
329   TRUE    FALSE     FALSE
332  FALSE     TRUE      TRUE
333  FALSE     TRUE      TRUE
334   TRUE     TRUE      TRUE
335   TRUE     TRUE      TRUE
336   TRUE     TRUE      TRUE
337  FALSE     TRUE      TRUE
338   TRUE    FALSE     FALSE
342  FALSE     TRUE     FALSE
343  FALSE     TRUE     FALSE
344  FALSE     TRUE      TRUE
346   TRUE    FALSE     FALSE
347   TRUE    FALSE     FALSE
348   TRUE    FALSE     FALSE
349   TRUE    FALSE     FALSE
366  FALSE     TRUE     FALSE
350   TRUE     TRUE      TRUE
351   TRUE     TRUE      TRUE
352   TRUE     TRUE      TRUE
353   TRUE     TRUE      TRUE
354   TRUE     TRUE      TRUE
355   TRUE     TRUE      TRUE
358  FALSE     TRUE      TRUE
359  FALSE     TRUE      TRUE
360  FALSE     TRUE      TRUE
361  FALSE     TRUE      TRUE
362  FALSE     TRUE      TRUE
363   TRUE    FALSE     FALSE
364   TRUE    FALSE     FALSE
365   TRUE     TRUE      TRUE
368   TRUE     TRUE     FALSE
369   TRUE     TRUE     FALSE
370   TRUE    FALSE     FALSE
371  FALSE     TRUE      TRUE
372   TRUE    FALSE     FALSE
373   TRUE    FALSE     FALSE
374  FALSE     TRUE      TRUE
375  FALSE     TRUE      TRUE
376  FALSE     TRUE     FALSE
377  FALSE     TRUE      TRUE
378  FALSE     TRUE      TRUE
379  FALSE     TRUE      TRUE
380  FALSE     TRUE      TRUE
381  FALSE     TRUE      TRUE
382   TRUE     TRUE      TRUE
383   TRUE     TRUE      TRUE
384   TRUE     TRUE      TRUE
385  FALSE     TRUE     FALSE
386  FALSE     TRUE     FALSE
387  FALSE     TRUE     FALSE
388  FALSE     TRUE     FALSE
389  FALSE     TRUE     FALSE
390   TRUE    FALSE     FALSE
391   TRUE    FALSE     FALSE
392   TRUE    FALSE     FALSE
398  FALSE     TRUE      TRUE
399  FALSE     TRUE      TRUE
400  FALSE     TRUE      TRUE
402   TRUE    FALSE     FALSE
403   TRUE    FALSE     FALSE
404   TRUE    FALSE     FALSE
405   TRUE    FALSE     FALSE
409  FALSE     TRUE      TRUE
410  FALSE     TRUE      TRUE
411  FALSE     TRUE      TRUE
412  FALSE     TRUE      TRUE
413   TRUE     TRUE      TRUE
417  FALSE     TRUE      TRUE
418  FALSE     TRUE     FALSE
419  FALSE     TRUE     FALSE
420  FALSE     TRUE     FALSE
421  FALSE     TRUE     FALSE
422  FALSE     TRUE      TRUE
423  FALSE     TRUE      TRUE
424   TRUE    FALSE     FALSE
425   TRUE     TRUE      TRUE
426   TRUE     TRUE      TRUE
427   TRUE     TRUE      TRUE
428  FALSE     TRUE      TRUE
429  FALSE     TRUE      TRUE
430  FALSE     TRUE      TRUE
431  FALSE     TRUE      TRUE
432   TRUE    FALSE     FALSE
433   TRUE    FALSE     FALSE
434   TRUE     TRUE      TRUE
435   TRUE     TRUE      TRUE
436   TRUE     TRUE      TRUE
437   TRUE     TRUE      TRUE
438   TRUE     TRUE      TRUE
439   TRUE     TRUE      TRUE
440   TRUE     TRUE     FALSE
441   TRUE     TRUE     FALSE
442  FALSE     TRUE      TRUE
443  FALSE     TRUE      TRUE
444  FALSE     TRUE     FALSE
445  FALSE     TRUE     FALSE
446  FALSE     TRUE     FALSE
447   TRUE     TRUE      TRUE
448   TRUE     TRUE      TRUE
449   TRUE     TRUE      TRUE
450   TRUE     TRUE      TRUE
451   TRUE     TRUE      TRUE
452   TRUE     TRUE      TRUE
453   TRUE     TRUE      TRUE
454   TRUE     TRUE      TRUE
455  FALSE     TRUE      TRUE
456  FALSE     TRUE      TRUE
457  FALSE     TRUE      TRUE
458   TRUE     TRUE      TRUE
459   TRUE     TRUE      TRUE
460  FALSE     TRUE      TRUE
461  FALSE     TRUE      TRUE
462  FALSE     TRUE      TRUE
463  FALSE     TRUE      TRUE
464  FALSE     TRUE      TRUE
465  FALSE     TRUE      TRUE
466  FALSE     TRUE      TRUE
467   TRUE     TRUE      TRUE
468  FALSE     TRUE      TRUE
469  FALSE     TRUE      TRUE
470  FALSE     TRUE      TRUE
471  FALSE     TRUE      TRUE
472  FALSE     TRUE      TRUE
473  FALSE     TRUE      TRUE
474  FALSE     TRUE      TRUE
475   TRUE     TRUE      TRUE
478  FALSE     TRUE      TRUE
479   TRUE     TRUE      TRUE
480   TRUE     TRUE      TRUE
481   TRUE     TRUE      TRUE
482   TRUE     TRUE      TRUE
483   TRUE     TRUE      TRUE
484   TRUE     TRUE      TRUE
485   TRUE     TRUE      TRUE
486   TRUE     TRUE      TRUE
487   TRUE     TRUE      TRUE
488   TRUE     TRUE      TRUE
489   TRUE     TRUE      TRUE
490   TRUE     TRUE      TRUE
491   TRUE     TRUE      TRUE
492   TRUE     TRUE      TRUE
493   TRUE     TRUE      TRUE
494   TRUE     TRUE      TRUE
495   TRUE     TRUE      TRUE
496   TRUE     TRUE      TRUE
497   TRUE     TRUE      TRUE
498   TRUE     TRUE      TRUE
499   TRUE     TRUE      TRUE
500   TRUE     TRUE      TRUE
501   TRUE     TRUE      TRUE
502   TRUE     TRUE      TRUE
  model         parameter                   label forReg forClass probModel
1   gbm           n.trees   # Boosting Iterations   TRUE     TRUE      TRUE
2   gbm interaction.depth          Max Tree Depth   TRUE     TRUE      TRUE
3   gbm         shrinkage               Shrinkage   TRUE     TRUE      TRUE
4   gbm    n.minobsinnode Min. Terminal Node Size   TRUE     TRUE      TRUE
$gpls
$gpls$label
[1] "Generalized Partial Least Squares"

$gpls$library
[1] "gpls"

$gpls$loop
NULL

$gpls$type
[1] "Classification"

$gpls$parameters
  parameter   class       label
1    K.prov numeric #Components

$gpls$grid
function (x, y, len = NULL, search = "grid") 
{
    if (search == "grid") {
        out <- data.frame(K.prov = seq(1, len))
    }
    else {
        out <- data.frame(K.prov = unique(sample(1:ncol(x), size = len, 
            replace = TRUE)))
    }
    out
}

$gpls$fit
function (x, y, wts, param, lev, last, classProbs, ...) 
gpls::gpls(x, y, K.prov = param$K.prov, ...)

$gpls$predict
function (modelFit, newdata, submodels = NULL) 
predict(modelFit, newdata)$class

$gpls$prob
function (modelFit, newdata, submodels = NULL) 
{
    out <- predict(modelFit, newdata)$predicted
    out <- cbind(out, 1 - out)
    colnames(out) <- modelFit$obsLevels
    out
}

$gpls$predictors
function (x, ...) 
{
    out <- if (hasTerms(x)) 
        predictors(x$terms)
    else colnames(x$data$x.order)
    out[!(out %in% "Intercept")]
}

$gpls$tags
[1] "Logistic Regression"   "Partial Least Squares" "Linear Classifier"    

$gpls$sort
function (x) 
x[order(x[, 1]), ]

$gpls$levels
function (x) 
x$obsLevels


$kernelpls
$kernelpls$label
[1] "Partial Least Squares"

$kernelpls$library
[1] "pls"

$kernelpls$type
[1] "Regression"     "Classification"

$kernelpls$parameters
  parameter   class       label
1     ncomp numeric #Components

$kernelpls$grid
function (x, y, len = NULL, search = "grid") 
{
    if (search == "grid") {
        out <- data.frame(ncomp = seq(1, min(ncol(x) - 1, len), 
            by = 1))
    }
    else {
        out <- data.frame(ncomp = unique(sample(1:ncol(x), size = len, 
            replace = TRUE)))
    }
    out
}

$kernelpls$loop
function (grid) 
{
    grid <- grid[order(grid$ncomp, decreasing = TRUE), , drop = FALSE]
    loop <- grid[1, , drop = FALSE]
    submodels <- list(grid[-1, , drop = FALSE])
    list(loop = loop, submodels = submodels)
}

$kernelpls$fit
function (x, y, wts, param, lev, last, classProbs, ...) 
{
    ncomp <- min(ncol(x), param$ncomp)
    out <- if (is.factor(y)) {
        caret::plsda(x, y, method = "kernelpls", ncomp = ncomp, 
            ...)
    }
    else {
        dat <- if (is.data.frame(x)) 
            x
        else as.data.frame(x)
        dat$.outcome <- y
        pls::plsr(.outcome ~ ., data = dat, method = "kernelpls", 
            ncomp = ncomp, ...)
    }
    out
}

$kernelpls$predict
function (modelFit, newdata, submodels = NULL) 
{
    out <- if (modelFit$problemType == "Classification") {
        if (!is.matrix(newdata)) 
            newdata <- as.matrix(newdata)
        out <- predict(modelFit, newdata, type = "class")
    }
    else as.vector(pls:::predict.mvr(modelFit, newdata, ncomp = max(modelFit$ncomp)))
    if (!is.null(submodels)) {
        tmp <- vector(mode = "list", length = nrow(submodels))
        if (modelFit$problemType == "Classification") {
            if (length(submodels$ncomp) > 1) {
                tmp <- as.list(predict(modelFit, newdata, ncomp = submodels$ncomp))
            }
            else tmp <- list(predict(modelFit, newdata, ncomp = submodels$ncomp))
        }
        else {
            tmp <- as.list(as.data.frame(apply(predict(modelFit, 
                newdata, ncomp = submodels$ncomp), 3, function(x) list(x))))
        }
        out <- c(list(out), tmp)
    }
    out
}

$kernelpls$prob
function (modelFit, newdata, submodels = NULL) 
{
    if (!is.matrix(newdata)) 
        newdata <- as.matrix(newdata)
    out <- predict(modelFit, newdata, type = "prob", ncomp = modelFit$tuneValue$ncomp)
    if (length(dim(out)) == 3) {
        if (dim(out)[1] > 1) {
            out <- out[, , 1]
        }
        else {
            out <- as.data.frame(t(out[, , 1]))
        }
    }
    if (!is.null(submodels)) {
        tmp <- vector(mode = "list", length = nrow(submodels) + 
            1)
        tmp[[1]] <- out
        for (j in seq(along = submodels$ncomp)) {
            tmpProb <- predict(modelFit, newdata, type = "prob", 
                ncomp = submodels$ncomp[j])
            if (length(dim(tmpProb)) == 3) {
                if (dim(tmpProb)[1] > 1) {
                  tmpProb <- tmpProb[, , 1]
                }
                else {
                  tmpProb <- as.data.frame(t(tmpProb[, , 1]))
                }
            }
            tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, 
                drop = FALSE])
        }
        out <- tmp
    }
    out
}

$kernelpls$varImp
function (object, estimate = NULL, ...) 
{
    library(pls)
    modelCoef <- coef(object, intercept = FALSE, comps = 1:object$ncomp)
    perf <- MSEP(object)$val
    nms <- dimnames(perf)
    if (length(nms$estimate) > 1) {
        pIndex <- if (is.null(estimate)) 
            1
        else which(nms$estimate == estimate)
        perf <- perf[pIndex, , , drop = FALSE]
    }
    numResp <- dim(modelCoef)[2]
    if (numResp <= 2) {
        modelCoef <- modelCoef[, 1, , drop = FALSE]
        perf <- perf[, 1, ]
        delta <- -diff(perf)
        delta <- delta/sum(delta)
        out <- data.frame(Overall = apply(abs(modelCoef), 1, 
            weighted.mean, w = delta))
    }
    else {
        perf <- -t(apply(perf[1, , ], 1, diff))
        perf <- t(apply(perf, 1, function(u) u/sum(u)))
        out <- matrix(NA, ncol = numResp, nrow = dim(modelCoef)[1])
        for (i in 1:numResp) {
            tmp <- abs(modelCoef[, i, , drop = FALSE])
            out[, i] <- apply(tmp, 1, weighted.mean, w = perf[i, 
                ])
        }
        colnames(out) <- dimnames(modelCoef)[[2]]
        rownames(out) <- dimnames(modelCoef)[[1]]
    }
    as.data.frame(out)
}

$kernelpls$predictors
function (x, ...) 
rownames(x$projection)

$kernelpls$levels
function (x) 
x$obsLevels

$kernelpls$tags
[1] "Partial Least Squares" "Feature Extraction"    "Kernel Method"        
[4] "Linear Classifier"     "Linear Regression"    

$kernelpls$sort
function (x) 
x[order(x[, 1]), ]


$ORFpls
$ORFpls$label
[1] "Oblique Random Forest"

$ORFpls$library
[1] "obliqueRF"

$ORFpls$loop
NULL

$ORFpls$type
[1] "Classification"

$ORFpls$parameters
  parameter   class                         label
1      mtry numeric #Randomly Selected Predictors

$ORFpls$grid
function (x, y, len = NULL, search = "grid") 
{
    if (search == "grid") {
        out <- data.frame(mtry = caret::var_seq(p = ncol(x), 
            classification = is.factor(y), len = len))
    }
    else {
        out <- data.frame(mtry = unique(sample(1:ncol(x), size = len, 
            replace = TRUE)))
    }
    out
}

$ORFpls$fit
function (x, y, wts, param, lev, last, classProbs, ...) 
{
    require(obliqueRF)
    obliqueRF::obliqueRF(as.matrix(x), y, training_method = "pls", 
        ...)
}

$ORFpls$predict
function (modelFit, newdata, submodels = NULL) 
predict(modelFit, newdata)

$ORFpls$prob
function (modelFit, newdata, submodels = NULL) 
predict(modelFit, newdata, type = "prob")

$ORFpls$levels
function (x) 
x$obsLevels

$ORFpls$notes
[1] "Unlike other packages used by `train`, the `obliqueRF` package is fully loaded when this model is used."

$ORFpls$tags
[1] "Random Forest"              "Oblique Tree"              
[3] "Partial Least Squares"      "Implicit Feature Selection"
[5] "Ensemble Model"             "Two Class Only"            

$ORFpls$sort
function (x) 
x[order(x[, 1]), ]


$pls
$pls$label
[1] "Partial Least Squares"

$pls$library
[1] "pls"

$pls$type
[1] "Regression"     "Classification"

$pls$parameters
  parameter   class       label
1     ncomp numeric #Components

$pls$grid
function (x, y, len = NULL, search = "grid") 
{
    if (search == "grid") {
        out <- data.frame(ncomp = seq(1, min(ncol(x) - 1, len), 
            by = 1))
    }
    else {
        out <- data.frame(ncomp = unique(sample(1:ncol(x), replace = TRUE)))
    }
    out
}

$pls$loop
function (grid) 
{
    grid <- grid[order(grid$ncomp, decreasing = TRUE), , drop = FALSE]
    loop <- grid[1, , drop = FALSE]
    submodels <- list(grid[-1, , drop = FALSE])
    list(loop = loop, submodels = submodels)
}

$pls$fit
function (x, y, wts, param, lev, last, classProbs, ...) 
{
    ncomp <- min(ncol(x), param$ncomp)
    out <- if (is.factor(y)) {
        plsda(x, y, method = "oscorespls", ncomp = ncomp, ...)
    }
    else {
        dat <- if (is.data.frame(x)) 
            x
        else as.data.frame(x)
        dat$.outcome <- y
        pls::plsr(.outcome ~ ., data = dat, method = "oscorespls", 
            ncomp = ncomp, ...)
    }
    out
}

$pls$predict
function (modelFit, newdata, submodels = NULL) 
{
    out <- if (modelFit$problemType == "Classification") {
        if (!is.matrix(newdata)) 
            newdata <- as.matrix(newdata)
        out <- predict(modelFit, newdata, type = "class")
    }
    else as.vector(pls:::predict.mvr(modelFit, newdata, ncomp = max(modelFit$ncomp)))
    if (!is.null(submodels)) {
        tmp <- vector(mode = "list", length = nrow(submodels))
        if (modelFit$problemType == "Classification") {
            if (length(submodels$ncomp) > 1) {
                tmp <- as.list(predict(modelFit, newdata, ncomp = submodels$ncomp))
            }
            else tmp <- list(predict(modelFit, newdata, ncomp = submodels$ncomp))
        }
        else {
            tmp <- as.list(as.data.frame(apply(predict(modelFit, 
                newdata, ncomp = submodels$ncomp), 3, function(x) list(x))))
        }
        out <- c(list(out), tmp)
    }
    out
}

$pls$prob
function (modelFit, newdata, submodels = NULL) 
{
    if (!is.matrix(newdata)) 
        newdata <- as.matrix(newdata)
    out <- predict(modelFit, newdata, type = "prob", ncomp = modelFit$tuneValue$ncomp)
    if (length(dim(out)) == 3) {
        if (dim(out)[1] > 1) {
            out <- out[, , 1]
        }
        else {
            out <- as.data.frame(t(out[, , 1]))
        }
    }
    if (!is.null(submodels)) {
        tmp <- vector(mode = "list", length = nrow(submodels) + 
            1)
        tmp[[1]] <- out
        for (j in seq(along = submodels$ncomp)) {
            tmpProb <- predict(modelFit, newdata, type = "prob", 
                ncomp = submodels$ncomp[j])
            if (length(dim(tmpProb)) == 3) {
                if (dim(tmpProb)[1] > 1) {
                  tmpProb <- tmpProb[, , 1]
                }
                else {
                  tmpProb <- as.data.frame(t(tmpProb[, , 1]))
                }
            }
            tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels])
        }
        out <- tmp
    }
    out
}

$pls$varImp
function (object, estimate = NULL, ...) 
{
    library(pls)
    modelCoef <- coef(object, intercept = FALSE, comps = 1:object$ncomp)
    perf <- pls:::MSEP.mvr(object)$val
    nms <- dimnames(perf)
    if (length(nms$estimate) > 1) {
        pIndex <- if (is.null(estimate)) 
            1
        else which(nms$estimate == estimate)
        perf <- perf[pIndex, , , drop = FALSE]
    }
    numResp <- dim(modelCoef)[2]
    if (numResp <= 2) {
        modelCoef <- modelCoef[, 1, , drop = FALSE]
        perf <- perf[, 1, ]
        delta <- -diff(perf)
        delta <- delta/sum(delta)
        out <- data.frame(Overall = apply(abs(modelCoef), 1, 
            weighted.mean, w = delta))
    }
    else {
        perf <- -t(apply(perf[1, , ], 1, diff))
        perf <- t(apply(perf, 1, function(u) u/sum(u)))
        out <- matrix(NA, ncol = numResp, nrow = dim(modelCoef)[1])
        for (i in 1:numResp) {
            tmp <- abs(modelCoef[, i, , drop = FALSE])
            out[, i] <- apply(tmp, 1, weighted.mean, w = perf[i, 
                ])
        }
        colnames(out) <- dimnames(modelCoef)[[2]]
        rownames(out) <- dimnames(modelCoef)[[1]]
    }
    as.data.frame(out)
}

$pls$predictors
function (x, ...) 
rownames(x$projection)

$pls$levels
function (x) 
x$obsLevels

$pls$tags
[1] "Partial Least Squares" "Feature Extraction"    "Linear Classifier"    
[4] "Linear Regression"    

$pls$sort
function (x) 
x[order(x[, 1]), ]


$plsRglm
$plsRglm$label
[1] "Partial Least Squares Generalized Linear Models "

$plsRglm$library
[1] "plsRglm"

$plsRglm$loop
NULL

$plsRglm$type
[1] "Classification" "Regression"    

$plsRglm$parameters
          parameter   class             label
1                nt numeric   #PLS Components
2 alpha.pvals.expli numeric p-Value threshold

$plsRglm$grid
function (x, y, len = NULL, search = "grid") 
{
    if (search == "grid") {
        out <- expand.grid(nt = 1:len, alpha.pvals.expli = 10^(c(-2:(len - 
            3), 0)))
    }
    else {
        out <- data.frame(nt = sample(1:ncol(x), size = len, 
            replace = TRUE), alpha.pvals.expli = runif(len, min = 0, 
            0.2))
    }
    out
}

$plsRglm$fit
function (x, y, wts, param, lev, last, classProbs, ...) 
{
    require(plsRglm)
    if (is.factor(y)) {
        lv <- levels(y)
        y <- as.numeric(y) - 1
        dst <- "pls-glm-logistic"
    }
    else {
        lv <- NULL
        dst <- "pls-glm-gaussian"
    }
    theDots <- list(...)
    if (any(names(theDots) == "modele")) {
        mod <- plsrRglm::plsRglm(y, x, nt = param$nt, pvals.expli = param$alpha.pvals.expli < 
            1, sparse = param$alpha.pvals.expli < 1, alpha.pvals.expli = param$alpha.pvals.expli, 
            ...)
    }
    else {
        mod <- plsRglm::plsRglm(y, x, nt = param$nt, modele = dst, 
            pvals.expli = param$alpha.pvals.expli < 1, sparse = param$alpha.pvals.expli < 
                1, alpha.pvals.expli = param$alpha.pvals.expli, 
            ...)
    }
    mod
}

$plsRglm$predict
function (modelFit, newdata, submodels = NULL) 
{
    out <- predict(modelFit, newdata, type = "response")
    if (modelFit$problemType == "Classification") {
        out <- factor(ifelse(out >= 0.5, modelFit$obsLevels[2], 
            modelFit$obsLevels[1]))
    }
    out
}

$plsRglm$prob
function (modelFit, newdata, submodels = NULL) 
{
    out <- predict(modelFit, newdata, type = "response")
    out <- cbind(1 - out, out)
    dimnames(out)[[2]] <- rev(modelFit$obsLevels)
    out
}

$plsRglm$varImp
NULL

$plsRglm$predictors
function (x, ...) 
{
    vars <- names(which(coef(x)[[2]][, 1] != 0))
    vars[vars != "Intercept"]
}

$plsRglm$notes
[1] "Unlike other packages used by `train`, the `plsRglm` package is fully loaded when this model is used."

$plsRglm$tags
[1] "Generalized Linear Models" "Partial Least Squares"    
[3] "Two Class Only"           

$plsRglm$levels
function (x) 
x$lev

$plsRglm$sort
function (x) 
x[order(-x$alpha.pvals.expli, x$nt), ]


$simpls
$simpls$label
[1] "Partial Least Squares"

$simpls$library
[1] "pls"

$simpls$type
[1] "Regression"     "Classification"

$simpls$parameters
  parameter   class       label
1     ncomp numeric #Components

$simpls$grid
function (x, y, len = NULL, search = "grid") 
{
    if (search == "grid") {
        out <- data.frame(ncomp = seq(1, min(ncol(x) - 1, len), 
            by = 1))
    }
    else {
        out <- data.frame(ncomp = unique(sample(1:(ncol(x) - 
            1), size = len, replace = TRUE)))
    }
    out
}

$simpls$loop
function (grid) 
{
    grid <- grid[order(grid$ncomp, decreasing = TRUE), , drop = FALSE]
    loop <- grid[1, , drop = FALSE]
    submodels <- list(grid[-1, , drop = FALSE])
    list(loop = loop, submodels = submodels)
}

$simpls$fit
function (x, y, wts, param, lev, last, classProbs, ...) 
{
    ncomp <- min(ncol(x), param$ncomp)
    out <- if (is.factor(y)) {
        plsda(x, y, method = "simpls", ncomp = ncomp, ...)
    }
    else {
        dat <- if (is.data.frame(x)) 
            x
        else as.data.frame(x)
        dat$.outcome <- y
        pls::plsr(.outcome ~ ., data = dat, method = "simpls", 
            ncomp = ncomp, ...)
    }
    out
}

$simpls$predict
function (modelFit, newdata, submodels = NULL) 
{
    out <- if (modelFit$problemType == "Classification") {
        if (!is.matrix(newdata)) 
            newdata <- as.matrix(newdata)
        out <- predict(modelFit, newdata, type = "class")
    }
    else as.vector(pls:::predict.mvr(modelFit, newdata, ncomp = max(modelFit$ncomp)))
    if (!is.null(submodels)) {
        tmp <- vector(mode = "list", length = nrow(submodels))
        if (modelFit$problemType == "Classification") {
            if (length(submodels$ncomp) > 1) {
                tmp <- as.list(predict(modelFit, newdata, ncomp = submodels$ncomp))
            }
            else tmp <- list(predict(modelFit, newdata, ncomp = submodels$ncomp))
        }
        else {
            tmp <- as.list(as.data.frame(apply(predict(modelFit, 
                newdata, ncomp = submodels$ncomp), 3, function(x) list(x))))
        }
        out <- c(list(out), tmp)
    }
    out
}

$simpls$prob
function (modelFit, newdata, submodels = NULL) 
{
    if (!is.matrix(newdata)) 
        newdata <- as.matrix(newdata)
    out <- predict(modelFit, newdata, type = "prob", ncomp = modelFit$tuneValue$ncomp)
    if (length(dim(out)) == 3) {
        if (dim(out)[1] > 1) {
            out <- out[, , 1]
        }
        else {
            out <- as.data.frame(t(out[, , 1]))
        }
    }
    if (!is.null(submodels)) {
        tmp <- vector(mode = "list", length = nrow(submodels) + 
            1)
        tmp[[1]] <- out
        for (j in seq(along = submodels$ncomp)) {
            tmpProb <- predict(modelFit, newdata, type = "prob", 
                ncomp = submodels$ncomp[j])
            if (length(dim(tmpProb)) == 3) {
                if (dim(tmpProb)[1] > 1) {
                  tmpProb <- tmpProb[, , 1]
                }
                else {
                  tmpProb <- as.data.frame(t(tmpProb[, , 1]))
                }
            }
            tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, 
                drop = FALSE])
        }
        out <- tmp
    }
    out
}

$simpls$varImp
function (object, estimate = NULL, ...) 
{
    library(pls)
    modelCoef <- coef(object, intercept = FALSE, comps = 1:object$ncomp)
    perf <- pls:::MSEP.mvr(object)$val
    nms <- dimnames(perf)
    if (length(nms$estimate) > 1) {
        pIndex <- if (is.null(estimate)) 
            1
        else which(nms$estimate == estimate)
        perf <- perf[pIndex, , , drop = FALSE]
    }
    numResp <- dim(modelCoef)[2]
    if (numResp <= 2) {
        modelCoef <- modelCoef[, 1, , drop = FALSE]
        perf <- perf[, 1, ]
        delta <- -diff(perf)
        delta <- delta/sum(delta)
        out <- data.frame(Overall = apply(abs(modelCoef), 1, 
            weighted.mean, w = delta))
    }
    else {
        perf <- -t(apply(perf[1, , ], 1, diff))
        perf <- t(apply(perf, 1, function(u) u/sum(u)))
        out <- matrix(NA, ncol = numResp, nrow = dim(modelCoef)[1])
        for (i in 1:numResp) {
            tmp <- abs(modelCoef[, i, , drop = FALSE])
            out[, i] <- apply(tmp, 1, weighted.mean, w = perf[i, 
                ])
        }
        colnames(out) <- dimnames(modelCoef)[[2]]
        rownames(out) <- dimnames(modelCoef)[[1]]
    }
    as.data.frame(out)
}

$simpls$levels
function (x) 
x$obsLevels

$simpls$predictors
function (x, ...) 
rownames(x$projection)

$simpls$tags
[1] "Partial Least Squares" "Feature Extraction"    "Linear Classifier"    
[4] "Linear Regression"    

$simpls$sort
function (x) 
x[order(x[, 1]), ]


$spls
$spls$label
[1] "Sparse Partial Least Squares"

$spls$library
[1] "spls"

$spls$type
[1] "Regression"     "Classification"

$spls$parameters
  parameter   class       label
1         K numeric #Components
2       eta numeric   Threshold
3     kappa numeric       Kappa

$spls$grid
function (x, y, len = NULL, search = "grid") 
{
    if (search == "grid") {
        out <- expand.grid(K = 1:min(nrow(x), ncol(x)), eta = seq(0.1, 
            0.9, length = len), kappa = 0.5)
    }
    else {
        out <- data.frame(kappa = runif(len, min = 0, max = 0.5), 
            eta = runif(len, min = 0, max = 1), K = sample(1:min(nrow(x), 
                ncol(x)), size = len, replace = TRUE))
    }
    out
}

$spls$loop
NULL

$spls$fit
function (x, y, wts, param, lev, last, classProbs, ...) 
{
    param$K <- min(param$K, length(y))
    if (is.factor(y)) {
        caret:::splsda(x, y, K = param$K, eta = param$eta, kappa = param$kappa, 
            ...)
    }
    else {
        spls::spls(x, y, K = param$K, eta = param$eta, kappa = param$kappa, 
            ...)
    }
}

$spls$predict
function (modelFit, newdata, submodels = NULL) 
{
    if (length(modelFit$obsLevels) < 2) {
        spls::predict.spls(modelFit, newdata)
    }
    else {
        as.character(caret:::predict.splsda(modelFit, newdata, 
            type = "class"))
    }
}

$spls$prob
function (modelFit, newdata, submodels = NULL) 
{
    if (!is.matrix(newdata)) 
        newdata <- as.matrix(newdata)
    caret:::predict.splsda(modelFit, newdata, type = "prob")
}

$spls$predictors
function (x, ...) 
colnames(x$x)[x$A]

$spls$tags
[1] "Partial Least Squares" "Feature Extraction"    "Linear Classifier"    
[4] "Linear Regression"     "L1 Regularization"    

$spls$levels
function (x) 
x$obsLevels

$spls$sort
function (x) 
x[order(-x$eta, x$K), ]


$widekernelpls
$widekernelpls$label
[1] "Partial Least Squares"

$widekernelpls$library
[1] "pls"

$widekernelpls$type
[1] "Regression"     "Classification"

$widekernelpls$parameters
  parameter   class       label
1     ncomp numeric #Components

$widekernelpls$grid
function (x, y, len = NULL, search = "grid") 
{
    if (search == "grid") {
        out <- data.frame(ncomp = seq(1, min(ncol(x) - 1, len), 
            by = 1))
    }
    else {
        out <- data.frame(ncomp = unique(sample(1:(ncol(x) - 
            1), size = len, replace = TRUE)))
    }
    out
}

$widekernelpls$loop
function (grid) 
{
    grid <- grid[order(grid$ncomp, decreasing = TRUE), , drop = FALSE]
    loop <- grid[1, , drop = FALSE]
    submodels <- list(grid[-1, , drop = FALSE])
    list(loop = loop, submodels = submodels)
}

$widekernelpls$fit
function (x, y, wts, param, lev, last, classProbs, ...) 
{
    ncomp <- min(ncol(x), param$ncomp)
    out <- if (is.factor(y)) {
        caret::plsda(x, y, method = "widekernelpls", ncomp = ncomp, 
            ...)
    }
    else {
        dat <- if (is.data.frame(x)) 
            x
        else as.data.frame(x)
        dat$.outcome <- y
        pls::plsr(.outcome ~ ., data = dat, method = "widekernelpls", 
            ncomp = ncomp, ...)
    }
    out
}

$widekernelpls$predict
function (modelFit, newdata, submodels = NULL) 
{
    out <- if (modelFit$problemType == "Classification") {
        if (!is.matrix(newdata)) 
            newdata <- as.matrix(newdata)
        out <- predict(modelFit, newdata, type = "class")
    }
    else as.vector(pls:::predict.mvr(modelFit, newdata, ncomp = max(modelFit$ncomp)))
    if (!is.null(submodels)) {
        tmp <- vector(mode = "list", length = nrow(submodels))
        if (modelFit$problemType == "Classification") {
            if (length(submodels$ncomp) > 1) {
                tmp <- as.list(predict(modelFit, newdata, ncomp = submodels$ncomp))
            }
            else tmp <- list(predict(modelFit, newdata, ncomp = submodels$ncomp))
        }
        else {
            tmp <- as.list(as.data.frame(apply(predict(modelFit, 
                newdata, ncomp = submodels$ncomp), 3, function(x) list(x))))
        }
        out <- c(list(out), tmp)
    }
    out
}

$widekernelpls$prob
function (modelFit, newdata, submodels = NULL) 
{
    if (!is.matrix(newdata)) 
        newdata <- as.matrix(newdata)
    out <- predict(modelFit, newdata, type = "prob", ncomp = modelFit$tuneValue$ncomp)
    if (length(dim(out)) == 3) {
        if (dim(out)[1] > 1) {
            out <- out[, , 1]
        }
        else {
            out <- as.data.frame(t(out[, , 1]))
        }
    }
    if (!is.null(submodels)) {
        tmp <- vector(mode = "list", length = nrow(submodels) + 
            1)
        tmp[[1]] <- out
        for (j in seq(along = submodels$ncomp)) {
            tmpProb <- predict(modelFit, newdata, type = "prob", 
                ncomp = submodels$ncomp[j])
            if (length(dim(tmpProb)) == 3) {
                if (dim(tmpProb)[1] > 1) {
                  tmpProb <- tmpProb[, , 1]
                }
                else {
                  tmpProb <- as.data.frame(t(tmpProb[, , 1]))
                }
            }
            tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, 
                drop = FALSE])
        }
        out <- tmp
    }
    out
}

$widekernelpls$predictors
function (x, ...) 
rownames(x$projection)

$widekernelpls$varImp
function (object, estimate = NULL, ...) 
{
    library(pls)
    modelCoef <- coef(object, intercept = FALSE, comps = 1:object$ncomp)
    perf <- pls:::MSEP.mvr(object)$val
    nms <- dimnames(perf)
    if (length(nms$estimate) > 1) {
        pIndex <- if (is.null(estimate)) 
            1
        else which(nms$estimate == estimate)
        perf <- perf[pIndex, , , drop = FALSE]
    }
    numResp <- dim(modelCoef)[2]
    if (numResp <= 2) {
        modelCoef <- modelCoef[, 1, , drop = FALSE]
        perf <- perf[, 1, ]
        delta <- -diff(perf)
        delta <- delta/sum(delta)
        out <- data.frame(Overall = apply(abs(modelCoef), 1, 
            weighted.mean, w = delta))
    }
    else {
        perf <- -t(apply(perf[1, , ], 1, diff))
        perf <- t(apply(perf, 1, function(u) u/sum(u)))
        out <- matrix(NA, ncol = numResp, nrow = dim(modelCoef)[1])
        for (i in 1:numResp) {
            tmp <- abs(modelCoef[, i, , drop = FALSE])
            out[, i] <- apply(tmp, 1, weighted.mean, w = perf[i, 
                ])
        }
        colnames(out) <- dimnames(modelCoef)[[2]]
        rownames(out) <- dimnames(modelCoef)[[1]]
    }
    as.data.frame(out)
}

$widekernelpls$levels
function (x) 
x$obsLevels

$widekernelpls$tags
[1] "Partial Least Squares" "Feature Extraction"    "Linear Classifier"    
[4] "Linear Regression"    

$widekernelpls$sort
function (x) 
x[order(x[, 1]), ]


$pls
$pls$label
[1] "Partial Least Squares"

$pls$library
[1] "pls"

$pls$type
[1] "Regression"     "Classification"

$pls$parameters
  parameter   class       label
1     ncomp numeric #Components

$pls$grid
function (x, y, len = NULL, search = "grid") 
{
    if (search == "grid") {
        out <- data.frame(ncomp = seq(1, min(ncol(x) - 1, len), 
            by = 1))
    }
    else {
        out <- data.frame(ncomp = unique(sample(1:ncol(x), replace = TRUE)))
    }
    out
}

$pls$loop
function (grid) 
{
    grid <- grid[order(grid$ncomp, decreasing = TRUE), , drop = FALSE]
    loop <- grid[1, , drop = FALSE]
    submodels <- list(grid[-1, , drop = FALSE])
    list(loop = loop, submodels = submodels)
}

$pls$fit
function (x, y, wts, param, lev, last, classProbs, ...) 
{
    ncomp <- min(ncol(x), param$ncomp)
    out <- if (is.factor(y)) {
        plsda(x, y, method = "oscorespls", ncomp = ncomp, ...)
    }
    else {
        dat <- if (is.data.frame(x)) 
            x
        else as.data.frame(x)
        dat$.outcome <- y
        pls::plsr(.outcome ~ ., data = dat, method = "oscorespls", 
            ncomp = ncomp, ...)
    }
    out
}

$pls$predict
function (modelFit, newdata, submodels = NULL) 
{
    out <- if (modelFit$problemType == "Classification") {
        if (!is.matrix(newdata)) 
            newdata <- as.matrix(newdata)
        out <- predict(modelFit, newdata, type = "class")
    }
    else as.vector(pls:::predict.mvr(modelFit, newdata, ncomp = max(modelFit$ncomp)))
    if (!is.null(submodels)) {
        tmp <- vector(mode = "list", length = nrow(submodels))
        if (modelFit$problemType == "Classification") {
            if (length(submodels$ncomp) > 1) {
                tmp <- as.list(predict(modelFit, newdata, ncomp = submodels$ncomp))
            }
            else tmp <- list(predict(modelFit, newdata, ncomp = submodels$ncomp))
        }
        else {
            tmp <- as.list(as.data.frame(apply(predict(modelFit, 
                newdata, ncomp = submodels$ncomp), 3, function(x) list(x))))
        }
        out <- c(list(out), tmp)
    }
    out
}

$pls$prob
function (modelFit, newdata, submodels = NULL) 
{
    if (!is.matrix(newdata)) 
        newdata <- as.matrix(newdata)
    out <- predict(modelFit, newdata, type = "prob", ncomp = modelFit$tuneValue$ncomp)
    if (length(dim(out)) == 3) {
        if (dim(out)[1] > 1) {
            out <- out[, , 1]
        }
        else {
            out <- as.data.frame(t(out[, , 1]))
        }
    }
    if (!is.null(submodels)) {
        tmp <- vector(mode = "list", length = nrow(submodels) + 
            1)
        tmp[[1]] <- out
        for (j in seq(along = submodels$ncomp)) {
            tmpProb <- predict(modelFit, newdata, type = "prob", 
                ncomp = submodels$ncomp[j])
            if (length(dim(tmpProb)) == 3) {
                if (dim(tmpProb)[1] > 1) {
                  tmpProb <- tmpProb[, , 1]
                }
                else {
                  tmpProb <- as.data.frame(t(tmpProb[, , 1]))
                }
            }
            tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels])
        }
        out <- tmp
    }
    out
}

$pls$varImp
function (object, estimate = NULL, ...) 
{
    library(pls)
    modelCoef <- coef(object, intercept = FALSE, comps = 1:object$ncomp)
    perf <- pls:::MSEP.mvr(object)$val
    nms <- dimnames(perf)
    if (length(nms$estimate) > 1) {
        pIndex <- if (is.null(estimate)) 
            1
        else which(nms$estimate == estimate)
        perf <- perf[pIndex, , , drop = FALSE]
    }
    numResp <- dim(modelCoef)[2]
    if (numResp <= 2) {
        modelCoef <- modelCoef[, 1, , drop = FALSE]
        perf <- perf[, 1, ]
        delta <- -diff(perf)
        delta <- delta/sum(delta)
        out <- data.frame(Overall = apply(abs(modelCoef), 1, 
            weighted.mean, w = delta))
    }
    else {
        perf <- -t(apply(perf[1, , ], 1, diff))
        perf <- t(apply(perf, 1, function(u) u/sum(u)))
        out <- matrix(NA, ncol = numResp, nrow = dim(modelCoef)[1])
        for (i in 1:numResp) {
            tmp <- abs(modelCoef[, i, , drop = FALSE])
            out[, i] <- apply(tmp, 1, weighted.mean, w = perf[i, 
                ])
        }
        colnames(out) <- dimnames(modelCoef)[[2]]
        rownames(out) <- dimnames(modelCoef)[[1]]
    }
    as.data.frame(out)
}

$pls$predictors
function (x, ...) 
rownames(x$projection)

$pls$levels
function (x) 
x$obsLevels

$pls$tags
[1] "Partial Least Squares" "Feature Extraction"    "Linear Classifier"    
[4] "Linear Regression"    

$pls$sort
function (x) 
x[order(x[, 1]), ]


$plsRglm
$plsRglm$label
[1] "Partial Least Squares Generalized Linear Models "

$plsRglm$library
[1] "plsRglm"

$plsRglm$loop
NULL

$plsRglm$type
[1] "Classification" "Regression"    

$plsRglm$parameters
          parameter   class             label
1                nt numeric   #PLS Components
2 alpha.pvals.expli numeric p-Value threshold

$plsRglm$grid
function (x, y, len = NULL, search = "grid") 
{
    if (search == "grid") {
        out <- expand.grid(nt = 1:len, alpha.pvals.expli = 10^(c(-2:(len - 
            3), 0)))
    }
    else {
        out <- data.frame(nt = sample(1:ncol(x), size = len, 
            replace = TRUE), alpha.pvals.expli = runif(len, min = 0, 
            0.2))
    }
    out
}

$plsRglm$fit
function (x, y, wts, param, lev, last, classProbs, ...) 
{
    require(plsRglm)
    if (is.factor(y)) {
        lv <- levels(y)
        y <- as.numeric(y) - 1
        dst <- "pls-glm-logistic"
    }
    else {
        lv <- NULL
        dst <- "pls-glm-gaussian"
    }
    theDots <- list(...)
    if (any(names(theDots) == "modele")) {
        mod <- plsrRglm::plsRglm(y, x, nt = param$nt, pvals.expli = param$alpha.pvals.expli < 
            1, sparse = param$alpha.pvals.expli < 1, alpha.pvals.expli = param$alpha.pvals.expli, 
            ...)
    }
    else {
        mod <- plsRglm::plsRglm(y, x, nt = param$nt, modele = dst, 
            pvals.expli = param$alpha.pvals.expli < 1, sparse = param$alpha.pvals.expli < 
                1, alpha.pvals.expli = param$alpha.pvals.expli, 
            ...)
    }
    mod
}

$plsRglm$predict
function (modelFit, newdata, submodels = NULL) 
{
    out <- predict(modelFit, newdata, type = "response")
    if (modelFit$problemType == "Classification") {
        out <- factor(ifelse(out >= 0.5, modelFit$obsLevels[2], 
            modelFit$obsLevels[1]))
    }
    out
}

$plsRglm$prob
function (modelFit, newdata, submodels = NULL) 
{
    out <- predict(modelFit, newdata, type = "response")
    out <- cbind(1 - out, out)
    dimnames(out)[[2]] <- rev(modelFit$obsLevels)
    out
}

$plsRglm$varImp
NULL

$plsRglm$predictors
function (x, ...) 
{
    vars <- names(which(coef(x)[[2]][, 1] != 0))
    vars[vars != "Intercept"]
}

$plsRglm$notes
[1] "Unlike other packages used by `train`, the `plsRglm` package is fully loaded when this model is used."

$plsRglm$tags
[1] "Generalized Linear Models" "Partial Least Squares"    
[3] "Two Class Only"           

$plsRglm$levels
function (x) 
x$lev

$plsRglm$sort
function (x) 
x[order(-x$alpha.pvals.expli, x$nt), ]


$pls
$pls$label
[1] "Partial Least Squares"

$pls$library
[1] "pls"

$pls$type
[1] "Regression"     "Classification"

$pls$parameters
  parameter   class       label
1     ncomp numeric #Components

$pls$grid
function (x, y, len = NULL, search = "grid") 
{
    if (search == "grid") {
        out <- data.frame(ncomp = seq(1, min(ncol(x) - 1, len), 
            by = 1))
    }
    else {
        out <- data.frame(ncomp = unique(sample(1:ncol(x), replace = TRUE)))
    }
    out
}

$pls$loop
function (grid) 
{
    grid <- grid[order(grid$ncomp, decreasing = TRUE), , drop = FALSE]
    loop <- grid[1, , drop = FALSE]
    submodels <- list(grid[-1, , drop = FALSE])
    list(loop = loop, submodels = submodels)
}

$pls$fit
function (x, y, wts, param, lev, last, classProbs, ...) 
{
    ncomp <- min(ncol(x), param$ncomp)
    out <- if (is.factor(y)) {
        plsda(x, y, method = "oscorespls", ncomp = ncomp, ...)
    }
    else {
        dat <- if (is.data.frame(x)) 
            x
        else as.data.frame(x)
        dat$.outcome <- y
        pls::plsr(.outcome ~ ., data = dat, method = "oscorespls", 
            ncomp = ncomp, ...)
    }
    out
}

$pls$predict
function (modelFit, newdata, submodels = NULL) 
{
    out <- if (modelFit$problemType == "Classification") {
        if (!is.matrix(newdata)) 
            newdata <- as.matrix(newdata)
        out <- predict(modelFit, newdata, type = "class")
    }
    else as.vector(pls:::predict.mvr(modelFit, newdata, ncomp = max(modelFit$ncomp)))
    if (!is.null(submodels)) {
        tmp <- vector(mode = "list", length = nrow(submodels))
        if (modelFit$problemType == "Classification") {
            if (length(submodels$ncomp) > 1) {
                tmp <- as.list(predict(modelFit, newdata, ncomp = submodels$ncomp))
            }
            else tmp <- list(predict(modelFit, newdata, ncomp = submodels$ncomp))
        }
        else {
            tmp <- as.list(as.data.frame(apply(predict(modelFit, 
                newdata, ncomp = submodels$ncomp), 3, function(x) list(x))))
        }
        out <- c(list(out), tmp)
    }
    out
}

$pls$prob
function (modelFit, newdata, submodels = NULL) 
{
    if (!is.matrix(newdata)) 
        newdata <- as.matrix(newdata)
    out <- predict(modelFit, newdata, type = "prob", ncomp = modelFit$tuneValue$ncomp)
    if (length(dim(out)) == 3) {
        if (dim(out)[1] > 1) {
            out <- out[, , 1]
        }
        else {
            out <- as.data.frame(t(out[, , 1]))
        }
    }
    if (!is.null(submodels)) {
        tmp <- vector(mode = "list", length = nrow(submodels) + 
            1)
        tmp[[1]] <- out
        for (j in seq(along = submodels$ncomp)) {
            tmpProb <- predict(modelFit, newdata, type = "prob", 
                ncomp = submodels$ncomp[j])
            if (length(dim(tmpProb)) == 3) {
                if (dim(tmpProb)[1] > 1) {
                  tmpProb <- tmpProb[, , 1]
                }
                else {
                  tmpProb <- as.data.frame(t(tmpProb[, , 1]))
                }
            }
            tmp[[j + 1]] <- as.data.frame(tmpProb[, modelFit$obsLevels])
        }
        out <- tmp
    }
    out
}

$pls$varImp
function (object, estimate = NULL, ...) 
{
    library(pls)
    modelCoef <- coef(object, intercept = FALSE, comps = 1:object$ncomp)
    perf <- pls:::MSEP.mvr(object)$val
    nms <- dimnames(perf)
    if (length(nms$estimate) > 1) {
        pIndex <- if (is.null(estimate)) 
            1
        else which(nms$estimate == estimate)
        perf <- perf[pIndex, , , drop = FALSE]
    }
    numResp <- dim(modelCoef)[2]
    if (numResp <= 2) {
        modelCoef <- modelCoef[, 1, , drop = FALSE]
        perf <- perf[, 1, ]
        delta <- -diff(perf)
        delta <- delta/sum(delta)
        out <- data.frame(Overall = apply(abs(modelCoef), 1, 
            weighted.mean, w = delta))
    }
    else {
        perf <- -t(apply(perf[1, , ], 1, diff))
        perf <- t(apply(perf, 1, function(u) u/sum(u)))
        out <- matrix(NA, ncol = numResp, nrow = dim(modelCoef)[1])
        for (i in 1:numResp) {
            tmp <- abs(modelCoef[, i, , drop = FALSE])
            out[, i] <- apply(tmp, 1, weighted.mean, w = perf[i, 
                ])
        }
        colnames(out) <- dimnames(modelCoef)[[2]]
        rownames(out) <- dimnames(modelCoef)[[1]]
    }
    as.data.frame(out)
}

$pls$predictors
function (x, ...) 
rownames(x$projection)

$pls$levels
function (x) 
x$obsLevels

$pls$tags
[1] "Partial Least Squares" "Feature Extraction"    "Linear Classifier"    
[4] "Linear Regression"    

$pls$sort
function (x) 
x[order(x[, 1]), ]

caret documentation built on May 2, 2019, 5:47 p.m.

Related to modelLookup in caret...