Skip to contents

Adaptive boosting algorithm for classification. Calls RWeka::AdaBoostM1() from RWeka.

Dictionary

This Learner can be instantiated via lrn():

lrn("classif.AdaBoostM1")

Meta Information

  • Task type: “classif”

  • Predict Types: “response”, “prob”

  • Feature Types: “integer”, “numeric”, “factor”, “ordered”

  • Required Packages: mlr3, mlr3extralearners, RWeka

Parameters

IdTypeDefaultLevelsRange
subsetuntyped--
na.actionuntyped--
Pinteger100\([90, 100]\)
QlogicalFALSETRUE, FALSE-
Sinteger1\([1, \infty)\)
Iinteger10\([1, \infty)\)
Wuntyped"DecisionStump"-
output_debug_infologicalFALSETRUE, FALSE-
do_not_check_capabilitieslogicalFALSETRUE, FALSE-
num_decimal_placesinteger2\([1, \infty)\)
batch_sizeinteger100\([1, \infty)\)
optionsuntypedNULL-

Parameter changes

  • output_debug_info:

    • original id: output-debug-info

  • do_not_check_capabilities:

    • original id: do-not-check-capabilities

  • num_decimal_places:

    • original id: num-decimal-places

  • batch_size:

    • original id: batch-size

  • Reason for change: This learner contains changed ids of the following control arguments since their ids contain irregular pattern

References

Freund, Yoav, Schapire, E R, others (1996). “Experiments with a new boosting algorithm.” In icml, volume 96, 148–156. Citeseer.

See also

Author

henrifnk

Super classes

mlr3::Learner -> mlr3::LearnerClassif -> LearnerClassifAdaBoostM1

Methods

Inherited methods


Method new()

Creates a new instance of this R6 class.


Method clone()

The objects of this class are cloneable with this method.

Usage

LearnerClassifAdaBoostM1$clone(deep = FALSE)

Arguments

deep

Whether to make a deep clone.

Examples

# Define the Learner
learner = mlr3::lrn("classif.AdaBoostM1")
print(learner)
#> <LearnerClassifAdaBoostM1:classif.AdaBoostM1>: Adaptive Boosting
#> * Model: -
#> * Parameters: list()
#> * Packages: mlr3, mlr3extralearners, RWeka
#> * Predict Types:  [response], prob
#> * Feature Types: integer, numeric, factor, ordered
#> * Properties: multiclass, twoclass

# Define a Task
task = mlr3::tsk("sonar")

# Create train and test set
ids = mlr3::partition(task)

# Train the learner on the training ids
learner$train(task, row_ids = ids$train)

print(learner$model)
#> AdaBoostM1: Base classifiers and their weights: 
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V11 <= 0.17070000000000002 : R
#> V11 > 0.17070000000000002 : M
#> V11 is missing : R
#> 
#> Class distributions
#> 
#> V11 <= 0.17070000000000002
#> M	R	
#> 0.12727272727272726	0.8727272727272727	
#> V11 > 0.17070000000000002
#> M	R	
#> 0.7380952380952381	0.2619047619047619	
#> V11 is missing
#> M	R	
#> 0.49640287769784175	0.5035971223021583	
#> 
#> 
#> Weight: 1.33
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V36 <= 0.46475 : M
#> V36 > 0.46475 : R
#> V36 is missing : R
#> 
#> Class distributions
#> 
#> V36 <= 0.46475
#> M	R	
#> 0.5520373097692686	0.44796269023073143	
#> V36 > 0.46475
#> M	R	
#> 0.13833477883781428	0.8616652211621857	
#> V36 is missing
#> M	R	
#> 0.4025078369905959	0.5974921630094042	
#> 
#> 
#> Weight: 0.68
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V45 <= 0.38545 : R
#> V45 > 0.38545 : M
#> V45 is missing : R
#> 
#> Class distributions
#> 
#> V45 <= 0.38545
#> M	R	
#> 0.2557859638254008	0.7442140361745991	
#> V45 > 0.38545
#> M	R	
#> 0.8963128382890337	0.1036871617109662	
#> V45 is missing
#> M	R	
#> 0.3398563573774193	0.6601436426225806	
#> 
#> 
#> Weight: 1.18
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V11 <= 0.1117 : R
#> V11 > 0.1117 : M
#> V11 is missing : M
#> 
#> Class distributions
#> 
#> V11 <= 0.1117
#> M	R	
#> 0.0	1.0	
#> V11 > 0.1117
#> M	R	
#> 0.6177127230427156	0.3822872769572844	
#> V11 is missing
#> M	R	
#> 0.5481188931977876	0.45188110680221233	
#> 
#> 
#> Weight: 0.67
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V21 <= 0.81445 : R
#> V21 > 0.81445 : M
#> V21 is missing : R
#> 
#> Class distributions
#> 
#> V21 <= 0.81445
#> M	R	
#> 0.2802042213569229	0.7197957786430771	
#> V21 > 0.81445
#> M	R	
#> 0.7393119305731614	0.26068806942683853	
#> V21 is missing
#> M	R	
#> 0.414749767920319	0.585250232079681	
#> 
#> 
#> Weight: 0.97
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V13 <= 0.17670000000000002 : R
#> V13 > 0.17670000000000002 : M
#> V13 is missing : M
#> 
#> Class distributions
#> 
#> V13 <= 0.17670000000000002
#> M	R	
#> 0.14987114364134382	0.8501288563586562	
#> V13 > 0.17670000000000002
#> M	R	
#> 0.5998842960297699	0.4001157039702301	
#> V13 is missing
#> M	R	
#> 0.510151667468419	0.4898483325315811	
#> 
#> 
#> Weight: 0.62
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V4 <= 0.04915 : R
#> V4 > 0.04915 : M
#> V4 is missing : R
#> 
#> Class distributions
#> 
#> V4 <= 0.04915
#> M	R	
#> 0.28162643946554067	0.7183735605344593	
#> V4 > 0.04915
#> M	R	
#> 0.6536359573606423	0.3463640426393577	
#> V4 is missing
#> M	R	
#> 0.4122251936073745	0.5877748063926255	
#> 
#> 
#> Weight: 0.83
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V27 <= 0.71825 : R
#> V27 > 0.71825 : M
#> V27 is missing : R
#> 
#> Class distributions
#> 
#> V27 <= 0.71825
#> M	R	
#> 0.2779729071294684	0.7220270928705317	
#> V27 > 0.71825
#> M	R	
#> 0.7191849164493896	0.28081508355061036	
#> V27 is missing
#> M	R	
#> 0.46517049142384254	0.5348295085761575	
#> 
#> 
#> Weight: 0.95
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V54 <= 0.0225 : R
#> V54 > 0.0225 : M
#> V54 is missing : R
#> 
#> Class distributions
#> 
#> V54 <= 0.0225
#> M	R	
#> 0.42865372888056585	0.571346271119434	
#> V54 > 0.0225
#> M	R	
#> 0.9664629607162379	0.03353703928376202	
#> V54 is missing
#> M	R	
#> 0.49827505587385607	0.5017249441261441	
#> 
#> 
#> Weight: 0.5
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V23 <= 0.6987 : R
#> V23 > 0.6987 : M
#> V23 is missing : M
#> 
#> Class distributions
#> 
#> V23 <= 0.6987
#> M	R	
#> 0.41528907103125384	0.5847109289687461	
#> V23 > 0.6987
#> M	R	
#> 0.7991221642884683	0.20087783571153184	
#> V23 is missing
#> M	R	
#> 0.5947421046222063	0.4052578953777938	
#> 
#> 
#> Weight: 0.78
#> 
#> Number of performed Iterations: 10
#> 


# Make predictions for the test rows
predictions = learner$predict(task, row_ids = ids$test)

# Score the predictions
predictions$score()
#> classif.ce 
#>  0.2608696