Skip to contents

Fit a generalized additive classification model using a boosting algorithm. Calls mboost::gamboost() from mboost.

Dictionary

This Learner can be instantiated via lrn():

lrn("classif.gamboost")

Meta Information

  • Task type: “classif”

  • Predict Types: “response”, “prob”

  • Feature Types: “integer”, “numeric”, “factor”, “ordered”

  • Required Packages: mlr3, mlr3extralearners, mboost

Parameters

IdTypeDefaultLevelsRange
baselearnercharacterbbsbbs, bols, btree-
dfbaseinteger4\((-\infty, \infty)\)
offsetnumericNULL\((-\infty, \infty)\)
familycharacterBinomialBinomial, AdaExp, AUC, custom-
custom.familyuntyped--
linkcharacterlogitlogit, probit-
typecharacteradaboostglm, adaboost-
mstopinteger100\((-\infty, \infty)\)
nunumeric0.1\((-\infty, \infty)\)
riskcharacterinbaginbag, oobag, none-
oobweightsuntypedNULL-
tracelogicalFALSETRUE, FALSE-
stopinternuntypedFALSE-
na.actionuntypedstats::na.omit-

References

Bühlmann, Peter, Yu, Bin (2003). “Boosting with the L 2 loss: regression and classification.” Journal of the American Statistical Association, 98(462), 324–339.

See also

Author

be-marc

Super classes

mlr3::Learner -> mlr3::LearnerClassif -> LearnerClassifGAMBoost

Methods

Inherited methods


Method new()

Create a LearnerClassifGAMBoost object.


Method clone()

The objects of this class are cloneable with this method.

Usage

LearnerClassifGAMBoost$clone(deep = FALSE)

Arguments

deep

Whether to make a deep clone.

Examples

# Define the Learner
learner = mlr3::lrn("classif.gamboost")
print(learner)
#> <LearnerClassifGAMBoost:classif.gamboost>: Boosted Generalized Additive Model
#> * Model: -
#> * Parameters: list()
#> * Packages: mlr3, mlr3extralearners, mboost
#> * Predict Types:  [response], prob
#> * Feature Types: integer, numeric, factor, ordered
#> * Properties: twoclass, weights

# Define a Task
task = mlr3::tsk("sonar")

# Create train and test set
ids = mlr3::partition(task)

# Train the learner on the training ids
learner$train(task, row_ids = ids$train)

print(learner$model)
#> 
#> 	 Model-based Boosting
#> 
#> Call:
#> mboost::gamboost(formula = f, data = data, family = new("boost_family_glm",     fW = function (f)     {        f <- pmin(abs(f), 36) * sign(f)        p <- exp(f)/(exp(f) + exp(-f))        4 * p * (1 - p)    }, ngradient = function (y, f, w = 1)     {        exp2yf <- exp(-2 * y * f)        -(-2 * y * exp2yf)/(log(2) * (1 + exp2yf))    }, risk = function (y, f, w = 1)     sum(w * loss(y, f), na.rm = TRUE), offset = function (y,         w)     {        p <- weighted.mean(y > 0, w)        1/2 * log(p/(1 - p))    }, check_y = function (y)     {        if (!is.factor(y))             stop("response is not a factor but ", sQuote("family = Binomial()"))        if (nlevels(y) != 2)             stop("response is not a factor at two levels but ",                 sQuote("family = Binomial()"))        return(c(-1, 1)[as.integer(y)])    }, weights = function (w)     {        switch(weights, any = TRUE, none = isTRUE(all.equal(unique(w),             1)), zeroone = isTRUE(all.equal(unique(w + abs(w -             1)), 1)), case = isTRUE(all.equal(unique(w - floor(w)),             0)))    }, nuisance = function ()     return(NA), response = function (f)     {        f <- pmin(abs(f), 36) * sign(f)        p <- exp(f)/(exp(f) + exp(-f))        return(p)    }, rclass = function (f)     (f > 0) + 1, name = "Negative Binomial Likelihood (logit link)",     charloss = c("{ \n", "    f <- pmin(abs(f), 36) * sign(f) \n",     "    p <- exp(f)/(exp(f) + exp(-f)) \n", "    y <- (y + 1)/2 \n",     "    -y * log(p) - (1 - y) * log(1 - p) \n", "} \n")), control = ctrl)
#> 
#> 
#> 	 Negative Binomial Likelihood (logit link) 
#> 
#> Loss function: { 
#>      f <- pmin(abs(f), 36) * sign(f) 
#>      p <- exp(f)/(exp(f) + exp(-f)) 
#>      y <- (y + 1)/2 
#>      -y * log(p) - (1 - y) * log(1 - p) 
#>  } 
#>  
#> 
#> Number of boosting iterations: mstop = 100 
#> Step size:  0.1 
#> Offset:  0.007194369 
#> Number of baselearners:  60 
#> 


# Make predictions for the test rows
predictions = learner$predict(task, row_ids = ids$test)
#> Warning: Some ‘x’ values are beyond ‘boundary.knots’; Linear extrapolation used.
#> Warning: Some ‘x’ values are beyond ‘boundary.knots’; Linear extrapolation used.
#> Warning: Some ‘x’ values are beyond ‘boundary.knots’; Linear extrapolation used.
#> Warning: Some ‘x’ values are beyond ‘boundary.knots’; Linear extrapolation used.
#> Warning: Some ‘x’ values are beyond ‘boundary.knots’; Linear extrapolation used.
#> Warning: Some ‘x’ values are beyond ‘boundary.knots’; Linear extrapolation used.
#> Warning: Some ‘x’ values are beyond ‘boundary.knots’; Linear extrapolation used.
#> Warning: Some ‘x’ values are beyond ‘boundary.knots’; Linear extrapolation used.
#> Warning: Some ‘x’ values are beyond ‘boundary.knots’; Linear extrapolation used.
#> Warning: Some ‘x’ values are beyond ‘boundary.knots’; Linear extrapolation used.

# Score the predictions
predictions$score()
#> classif.ce 
#>  0.1884058