Skip to contents

Adaptive boosting algorithm for classification. Calls RWeka::AdaBoostM1() from RWeka.

Dictionary

This Learner can be instantiated via lrn():

lrn("classif.AdaBoostM1")

Meta Information

  • Task type: “classif”

  • Predict Types: “response”, “prob”

  • Feature Types: “integer”, “numeric”, “factor”, “ordered”

  • Required Packages: mlr3, mlr3extralearners, RWeka

Parameters

IdTypeDefaultLevelsRange
subsetuntyped--
na.actionuntyped--
Pinteger100\([90, 100]\)
QlogicalFALSETRUE, FALSE-
Sinteger1\([1, \infty)\)
Iinteger10\([1, \infty)\)
Wuntyped"DecisionStump"-
output_debug_infologicalFALSETRUE, FALSE-
do_not_check_capabilitieslogicalFALSETRUE, FALSE-
num_decimal_placesinteger2\([1, \infty)\)
batch_sizeinteger100\([1, \infty)\)
optionsuntypedNULL-

Parameter changes

  • output_debug_info:

    • original id: output-debug-info

  • do_not_check_capabilities:

    • original id: do-not-check-capabilities

  • num_decimal_places:

    • original id: num-decimal-places

  • batch_size:

    • original id: batch-size

  • Reason for change: This learner contains changed ids of the following control arguments since their ids contain irregular pattern

References

Freund, Yoav, Schapire, E R, others (1996). “Experiments with a new boosting algorithm.” In icml, volume 96, 148–156. Citeseer.

See also

Author

henrifnk

Super classes

mlr3::Learner -> mlr3::LearnerClassif -> LearnerClassifAdaBoostM1

Methods

Inherited methods


Method new()

Creates a new instance of this R6 class.


Method clone()

The objects of this class are cloneable with this method.

Usage

LearnerClassifAdaBoostM1$clone(deep = FALSE)

Arguments

deep

Whether to make a deep clone.

Examples

# Define the Learner
learner = mlr3::lrn("classif.AdaBoostM1")
print(learner)
#> 
#> ── <LearnerClassifAdaBoostM1> (classif.AdaBoostM1): Adaptive Boosting ──────────
#> • Model: -
#> • Parameters: list()
#> • Packages: mlr3, mlr3extralearners, and RWeka
#> • Predict Types: [response] and prob
#> • Feature Types: integer, numeric, factor, and ordered
#> • Encapsulation: none (fallback: -)
#> • Properties: multiclass and twoclass
#> • Other settings: use_weights = 'error'

# Define a Task
task = mlr3::tsk("sonar")

# Create train and test set
ids = mlr3::partition(task)

# Train the learner on the training ids
learner$train(task, row_ids = ids$train)

print(learner$model)
#> AdaBoostM1: Base classifiers and their weights: 
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V11 <= 0.1791 : R
#> V11 > 0.1791 : M
#> V11 is missing : M
#> 
#> Class distributions
#> 
#> V11 <= 0.1791
#> M	R	
#> 0.14035087719298245	0.8596491228070176	
#> V11 > 0.1791
#> M	R	
#> 0.7560975609756098	0.24390243902439024	
#> V11 is missing
#> M	R	
#> 0.5035971223021583	0.49640287769784175	
#> 
#> 
#> Weight: 1.38
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V45 <= 0.38545 : R
#> V45 > 0.38545 : M
#> V45 is missing : R
#> 
#> Class distributions
#> 
#> V45 <= 0.38545
#> M	R	
#> 0.34097313544993435	0.6590268645500656	
#> V45 > 0.38545
#> M	R	
#> 0.9059350503919367	0.0940649496080632	
#> V45 is missing
#> M	R	
#> 0.42213642213642216	0.5778635778635779	
#> 
#> 
#> Weight: 0.82
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V36 <= 0.47809999999999997 : M
#> V36 > 0.47809999999999997 : R
#> V36 is missing : M
#> 
#> Class distributions
#> 
#> V36 <= 0.47809999999999997
#> M	R	
#> 0.7085800692121784	0.29141993078782163	
#> V36 > 0.47809999999999997
#> M	R	
#> 0.17205080837366665	0.8279491916263334	
#> V36 is missing
#> M	R	
#> 0.5715824246037109	0.42841757539628905	
#> 
#> 
#> Weight: 1.04
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V23 <= 0.75415 : R
#> V23 > 0.75415 : M
#> V23 is missing : R
#> 
#> Class distributions
#> 
#> V23 <= 0.75415
#> M	R	
#> 0.3008668779973784	0.6991331220026216	
#> V23 > 0.75415
#> M	R	
#> 0.6998054080077787	0.3001945919922213	
#> V23 is missing
#> M	R	
#> 0.441153734757341	0.558846265242659	
#> 
#> 
#> Weight: 0.84
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V45 <= 0.26365 : R
#> V45 > 0.26365 : M
#> V45 is missing : M
#> 
#> Class distributions
#> 
#> V45 <= 0.26365
#> M	R	
#> 0.3801611034877917	0.6198388965122082	
#> V45 > 0.26365
#> M	R	
#> 0.8813395239055687	0.1186604760944313	
#> V45 is missing
#> M	R	
#> 0.5003645060022495	0.4996354939977505	
#> 
#> 
#> Weight: 0.77
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V13 <= 0.2112 : R
#> V13 > 0.2112 : M
#> V13 is missing : M
#> 
#> Class distributions
#> 
#> V13 <= 0.2112
#> M	R	
#> 0.31262446970539	0.68737553029461	
#> V13 > 0.2112
#> M	R	
#> 0.7402410660886382	0.25975893391136184	
#> V13 is missing
#> M	R	
#> 0.6100188341628751	0.389981165837125	
#> 
#> 
#> Weight: 0.97
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V52 <= 0.0085 : R
#> V52 > 0.0085 : M
#> V52 is missing : M
#> 
#> Class distributions
#> 
#> V52 <= 0.0085
#> M	R	
#> 0.1895941323147028	0.8104058676852972	
#> V52 > 0.0085
#> M	R	
#> 0.6487780850991299	0.3512219149008701	
#> V52 is missing
#> M	R	
#> 0.5280247659809648	0.47197523401903513	
#> 
#> 
#> Weight: 0.81
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V53 <= 0.00275 : M
#> V53 > 0.00275 : R
#> V53 is missing : R
#> 
#> Class distributions
#> 
#> V53 <= 0.00275
#> M	R	
#> 0.940489912747543	0.05951008725245708	
#> V53 > 0.00275
#> M	R	
#> 0.3666938618933086	0.6333061381066913	
#> V53 is missing
#> M	R	
#> 0.42660539854425195	0.5733946014557479	
#> 
#> 
#> Weight: 0.69
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V49 <= 0.02315 : R
#> V49 > 0.02315 : M
#> V49 is missing : M
#> 
#> Class distributions
#> 
#> V49 <= 0.02315
#> M	R	
#> 0.24756071210964734	0.7524392878903526	
#> V49 > 0.02315
#> M	R	
#> 0.6585096306808258	0.3414903693191742	
#> V49 is missing
#> M	R	
#> 0.5645070995778106	0.43549290042218936	
#> 
#> 
#> Weight: 0.75
#> 
#> Decision Stump
#> 
#> Classifications
#> 
#> V4 <= 0.04915 : R
#> V4 > 0.04915 : M
#> V4 is missing : R
#> 
#> Class distributions
#> 
#> V4 <= 0.04915
#> M	R	
#> 0.33083180767928105	0.6691681923207189	
#> V4 > 0.04915
#> M	R	
#> 0.7010488853126525	0.29895111468734736	
#> V4 is missing
#> M	R	
#> 0.4619232089239741	0.5380767910760259	
#> 
#> 
#> Weight: 0.76
#> 
#> Number of performed Iterations: 10
#> 


# Make predictions for the test rows
predictions = learner$predict(task, row_ids = ids$test)

# Score the predictions
predictions$score()
#> classif.ce 
#>  0.2898551