ranger: Multiclass Classification

# nolint start
library(mlexperiments)
library(mllrnrs)

See https://github.com/kapsner/mllrnrs/blob/main/R/learner_ranger.R for implementation details.

Preprocessing

Import and Prepare Data

library(mlbench)
data("DNA")
dataset <- DNA |>
  data.table::as.data.table() |>
  na.omit()

feature_cols <- colnames(dataset)[160:180]
target_col <- "Class"

General Configurations

seed <- 123
if (isTRUE(as.logical(Sys.getenv("_R_CHECK_LIMIT_CORES_")))) {
  # on cran
  ncores <- 2L
} else {
  ncores <- ifelse(
    test = parallel::detectCores() > 4,
    yes = 4L,
    no = ifelse(
      test = parallel::detectCores() < 2L,
      yes = 1L,
      no = parallel::detectCores()
    )
  )
}
options("mlexperiments.bayesian.max_init" = 10L)

Generate Training- and Test Data

data_split <- splitTools::partition(
  y = dataset[, get(target_col)],
  p = c(train = 0.7, test = 0.3),
  type = "stratified",
  seed = seed
)

train_x <- model.matrix(
  ~ -1 + .,
  dataset[data_split$train, .SD, .SDcols = feature_cols]
)
train_y <- dataset[data_split$train, get(target_col)]


test_x <- model.matrix(
  ~ -1 + .,
  dataset[data_split$test, .SD, .SDcols = feature_cols]
)
test_y <- dataset[data_split$test, get(target_col)]

Generate Training Data Folds

fold_list <- splitTools::create_folds(
  y = train_y,
  k = 3,
  type = "stratified",
  seed = seed
)

Experiments

Prepare Experiments

# required learner arguments, not optimized
learner_args <- list(probability = TRUE, classification = TRUE)

# set arguments for predict function and performance metric,
# required for mlexperiments::MLCrossValidation and
# mlexperiments::MLNestedCV
predict_args <- list(reshape = TRUE)
performance_metric <- metric("bacc")
performance_metric_args <- NULL
return_models <- FALSE

# required for grid search and initialization of bayesian optimization
parameter_grid <- expand.grid(
  num.trees = seq(500, 1000, 500),
  mtry = seq(2, 6, 2),
  min.node.size = seq(1, 9, 4),
  max.depth = seq(1, 9, 4),
  sample.fraction = seq(0.5, 0.8, 0.3)
)
# reduce to a maximum of 10 rows
if (nrow(parameter_grid) > 10) {
  set.seed(123)
  sample_rows <- sample(seq_len(nrow(parameter_grid)), 10, FALSE)
  parameter_grid <- kdry::mlh_subset(parameter_grid, sample_rows)
}

# required for bayesian optimization
parameter_bounds <- list(
  num.trees = c(100L, 1000L),
  mtry = c(2L, 9L),
  min.node.size = c(1L, 20L),
  max.depth = c(1L, 40L),
  sample.fraction = c(0.3, 1.)
)
optim_args <- list(
  iters.n = ncores,
  kappa = 3.5,
  acq = "ucb"
)

Hyperparameter Tuning

tuner <- mlexperiments::MLTuneParameters$new(
  learner = mllrnrs::LearnerRanger$new(),
  strategy = "grid",
  ncores = ncores,
  seed = seed
)

tuner$parameter_grid <- parameter_grid
tuner$learner_args <- learner_args
tuner$split_type <- "stratified"

tuner$set_data(
  x = train_x,
  y = train_y
)

tuner_results_grid <- tuner$execute(k = 3)
#> 
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [==================>-----------------------------------------------------------------------------] 2/10 ( 20%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [============================>-------------------------------------------------------------------] 3/10 ( 30%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [=====================================>----------------------------------------------------------] 4/10 ( 40%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [===============================================>------------------------------------------------] 5/10 ( 50%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [=========================================================>--------------------------------------] 6/10 ( 60%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [==================================================================>-----------------------------] 7/10 ( 70%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [============================================================================>-------------------] 8/10 ( 80%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [=====================================================================================>----------] 9/10 ( 90%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [===============================================================================================] 10/10 (100%)                                                                                                                                  
#>  Classification: using 'classification error rate' as optimization metric.

head(tuner_results_grid)
#>    setting_id metric_optim_mean num.trees mtry min.node.size max.depth sample.fraction probability classification
#> 1:          1         0.4786887       500    2             9         5             0.5        TRUE           TRUE
#> 2:          2         0.4791386       500    2             5         5             0.8        TRUE           TRUE
#> 3:          3         0.4419159       500    4             9         9             0.5        TRUE           TRUE
#> 4:          4         0.4809325      1000    2             9         1             0.5        TRUE           TRUE
#> 5:          5         0.4809325       500    2             9         1             0.8        TRUE           TRUE
#> 6:          6         0.4329589      1000    6             1         9             0.5        TRUE           TRUE

Bayesian Optimization

tuner <- mlexperiments::MLTuneParameters$new(
  learner = mllrnrs::LearnerRanger$new(),
  strategy = "bayesian",
  ncores = ncores,
  seed = seed
)

tuner$parameter_grid <- parameter_grid
tuner$parameter_bounds <- parameter_bounds

tuner$learner_args <- learner_args
tuner$optim_args <- optim_args

tuner$split_type <- "stratified"

tuner$set_data(
  x = train_x,
  y = train_y
)

tuner_results_bayesian <- tuner$execute(k = 3)
#> 
#> Registering parallel backend using 4 cores.

head(tuner_results_bayesian)
#>    Epoch setting_id num.trees mtry min.node.size max.depth sample.fraction gpUtility acqOptimum inBounds Elapsed      Score
#> 1:     0          1       500    2             9         5             0.5        NA      FALSE     TRUE   1.597 -0.4791386
#> 2:     0          2       500    2             5         5             0.8        NA      FALSE     TRUE   1.641 -0.4786887
#> 3:     0          3       500    4             9         9             0.5        NA      FALSE     TRUE   2.161 -0.4392295
#> 4:     0          4      1000    2             9         1             0.5        NA      FALSE     TRUE   1.635 -0.4809325
#> 5:     0          5       500    2             9         1             0.8        NA      FALSE     TRUE   0.416 -0.4809325
#> 6:     0          6      1000    6             1         9             0.5        NA      FALSE     TRUE   3.373 -0.4378800
#>    metric_optim_mean errorMessage probability classification
#> 1:         0.4791386           NA        TRUE           TRUE
#> 2:         0.4786887           NA        TRUE           TRUE
#> 3:         0.4392295           NA        TRUE           TRUE
#> 4:         0.4809325           NA        TRUE           TRUE
#> 5:         0.4809325           NA        TRUE           TRUE
#> 6:         0.4378800           NA        TRUE           TRUE

k-Fold Cross Validation

validator <- mlexperiments::MLCrossValidation$new(
  learner = mllrnrs::LearnerRanger$new(),
  fold_list = fold_list,
  ncores = ncores,
  seed = seed
)

validator$learner_args <- tuner$results$best.setting[-1]

validator$predict_args <- predict_args
validator$performance_metric <- performance_metric
validator$performance_metric_args <- performance_metric_args
validator$return_models <- return_models

validator$set_data(
  x = train_x,
  y = train_y
)

validator_results <- validator$execute()
#> 
#> CV fold: Fold1
#> 
#> CV fold: Fold2
#> 
#> CV fold: Fold3
#> CV progress [========================================================================================================] 3/3 (100%)
#>                                                                                                                                   

head(validator_results)
#>     fold performance num.trees mtry min.node.size max.depth sample.fraction probability classification
#> 1: Fold1   0.4206685       500    4             9         9             0.8        TRUE           TRUE
#> 2: Fold2   0.4011889       500    4             9         9             0.8        TRUE           TRUE
#> 3: Fold3   0.4252033       500    4             9         9             0.8        TRUE           TRUE

Nested Cross Validation

validator <- mlexperiments::MLNestedCV$new(
  learner = mllrnrs::LearnerRanger$new(),
  strategy = "grid",
  fold_list = fold_list,
  k_tuning = 3L,
  ncores = ncores,
  seed = seed
)

validator$parameter_grid <- parameter_grid
validator$learner_args <- learner_args
validator$split_type <- "stratified"

validator$predict_args <- predict_args
validator$performance_metric <- performance_metric
validator$performance_metric_args <- performance_metric_args
validator$return_models <- return_models

validator$set_data(
  x = train_x,
  y = train_y
)

validator_results <- validator$execute()
#> 
#> CV fold: Fold1
#> 
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [============================>-------------------------------------------------------------------] 3/10 ( 30%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [=====================================>----------------------------------------------------------] 4/10 ( 40%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [===============================================>------------------------------------------------] 5/10 ( 50%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [=========================================================>--------------------------------------] 6/10 ( 60%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [==================================================================>-----------------------------] 7/10 ( 70%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [============================================================================>-------------------] 8/10 ( 80%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [=====================================================================================>----------] 9/10 ( 90%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [===============================================================================================] 10/10 (100%)                                                                                                                                  
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> CV fold: Fold2
#> CV progress [====================================================================>-----------------------------------] 2/3 ( 67%)
#> 
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [============================>-------------------------------------------------------------------] 3/10 ( 30%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [=====================================>----------------------------------------------------------] 4/10 ( 40%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [===============================================>------------------------------------------------] 5/10 ( 50%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [=========================================================>--------------------------------------] 6/10 ( 60%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [==================================================================>-----------------------------] 7/10 ( 70%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [============================================================================>-------------------] 8/10 ( 80%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [=====================================================================================>----------] 9/10 ( 90%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [===============================================================================================] 10/10 (100%)                                                                                                                                  
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> CV fold: Fold3
#> CV progress [========================================================================================================] 3/3 (100%)
#>                                                                                                                                   
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [============================>-------------------------------------------------------------------] 3/10 ( 30%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [=====================================>----------------------------------------------------------] 4/10 ( 40%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [===============================================>------------------------------------------------] 5/10 ( 50%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [=========================================================>--------------------------------------] 6/10 ( 60%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [==================================================================>-----------------------------] 7/10 ( 70%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [============================================================================>-------------------] 8/10 ( 80%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [=====================================================================================>----------] 9/10 ( 90%)
#>  Classification: using 'classification error rate' as optimization metric.
#> 
#> Parameter settings [===============================================================================================] 10/10 (100%)                                                                                                                                  
#>  Classification: using 'classification error rate' as optimization metric.

head(validator_results)
#>     fold performance num.trees mtry min.node.size max.depth sample.fraction probability classification
#> 1: Fold1   0.4505456      1000    6             1         9             0.5        TRUE           TRUE
#> 2: Fold2   0.4162822      1000    6             1         9             0.5        TRUE           TRUE
#> 3: Fold3   0.4508978      1000    6             1         9             0.5        TRUE           TRUE

Inner Bayesian Optimization

validator <- mlexperiments::MLNestedCV$new(
  learner = mllrnrs::LearnerRanger$new(),
  strategy = "bayesian",
  fold_list = fold_list,
  k_tuning = 3L,
  ncores = ncores,
  seed = 312
)

validator$parameter_grid <- parameter_grid
validator$learner_args <- learner_args
validator$split_type <- "stratified"


validator$parameter_bounds <- parameter_bounds
validator$optim_args <- optim_args

validator$predict_args <- predict_args
validator$performance_metric <- performance_metric
validator$performance_metric_args <- performance_metric_args
validator$return_models <- TRUE

validator$set_data(
  x = train_x,
  y = train_y
)

validator_results <- validator$execute()
#> 
#> CV fold: Fold1
#> 
#> Registering parallel backend using 4 cores.
#> 
#> CV fold: Fold2
#> CV progress [====================================================================>-----------------------------------] 2/3 ( 67%)
#> 
#> Registering parallel backend using 4 cores.
#> 
#> CV fold: Fold3
#> CV progress [========================================================================================================] 3/3 (100%)
#>                                                                                                                                   
#> Registering parallel backend using 4 cores.

head(validator_results)
#>     fold performance num.trees mtry min.node.size max.depth sample.fraction probability classification
#> 1: Fold1   0.4470914      1000    6             1         9       0.5000000        TRUE           TRUE
#> 2: Fold2   0.4419416       636    6             2        12       0.9378338        TRUE           TRUE
#> 3: Fold3   0.4737314       388    6             5        14       0.7457303        TRUE           TRUE

Holdout Test Dataset Performance

Predict Outcome in Holdout Test Dataset

preds_ranger <- mlexperiments::predictions(
  object = validator,
  newdata = test_x
)

Evaluate Performance on Holdout Test Dataset

perf_ranger <- mlexperiments::performance(
  object = validator,
  prediction_results = preds_ranger,
  y_ground_truth = test_y
)
perf_ranger
#>    model performance
#> 1: Fold1   0.4466305
#> 2: Fold2   0.4601201
#> 3: Fold3   0.4742046

Appendix I: Grid-Search with Target Weigths

Here, ranger’s case.weights-argument is used to rescale the case-weights during the training.

# define the target weights
y_weights <- ifelse(train_y == "n", 0.8, ifelse(train_y == "ei", 1.2, 1))
head(y_weights)
#> [1] 1.2 1.2 0.0 0.8 0.8 0.0
tuner_w_weights <- mlexperiments::MLTuneParameters$new(
  learner = mllrnrs::LearnerRanger$new(),
  strategy = "grid",
  ncores = ncores,
  seed = seed
)

tuner_w_weights$parameter_grid <- parameter_grid
tuner_w_weights$learner_args <- c(
  learner_args,
  list(case_weights = y_weights)
)
tuner_w_weights$split_type <- "stratified"

tuner_w_weights$set_data(
  x = train_x,
  y = train_y
)

tuner_results_grid <- tuner_w_weights$execute(k = 3)
#> 
#> Parameter settings [============================>-------------------------------------------------------------------] 3/10 ( 30%)
#> Parameter settings [=====================================>----------------------------------------------------------] 4/10 ( 40%)
#> Parameter settings [===============================================>------------------------------------------------] 5/10 ( 50%)
#> Parameter settings [=========================================================>--------------------------------------] 6/10 ( 60%)
#> Parameter settings [==================================================================>-----------------------------] 7/10 ( 70%)
#> Parameter settings [============================================================================>-------------------] 8/10 ( 80%)
#> Parameter settings [=====================================================================================>----------] 9/10 ( 90%)
#> Parameter settings [===============================================================================================] 10/10 (100%)                                                                                                                                  

head(tuner_results_grid)
#>    setting_id metric_optim_mean num.trees  mtry min.node.size max.depth sample.fraction probability classification
#>         <int>             <num>     <num> <num>         <num>     <num>           <num>      <lgcl>         <lgcl>
#> 1:          1         0.4665865       500     2             9         5             0.5        TRUE           TRUE
#> 2:          2         0.4656941       500     2             5         5             0.8        TRUE           TRUE
#> 3:          3         0.4486635       500     4             9         9             0.5        TRUE           TRUE
#> 4:          4         0.4809325      1000     2             9         1             0.5        TRUE           TRUE
#> 5:          5         0.4809325       500     2             9         1             0.8        TRUE           TRUE
#> 6:          6         0.4544915      1000     6             1         9             0.5        TRUE           TRUE

Appendix II: k-Fold Cross Validation with Target Weigths

validator <- mlexperiments::MLCrossValidation$new(
  learner = mllrnrs::LearnerRanger$new(),
  fold_list = fold_list,
  ncores = ncores,
  seed = seed
)

# append the optimized setting from above with the newly created weights
validator$learner_args <- c(
  tuner$results$best.setting[-1],
  list("case_weights" = y_weights)
)

validator$predict_args <- predict_args
validator$performance_metric <- performance_metric
validator$performance_metric_args <- performance_metric_args
validator$return_models <- return_models

validator$set_data(
  x = train_x,
  y = train_y
)

validator_results <- validator$execute()
#> 
#> CV fold: Fold1
#> 
#> CV fold: Fold2
#> 
#> CV fold: Fold3
#> CV progress [========================================================================================================] 3/3 (100%)
#>                                                                                                                                   

head(validator_results)
#>      fold performance num.trees  mtry min.node.size max.depth sample.fraction probability classification
#>    <char>       <num>     <num> <num>         <num>     <num>           <num>      <lgcl>         <lgcl>
#> 1:  Fold1   0.4565664       500     4             9         9             0.8        TRUE           TRUE
#> 2:  Fold2   0.4266609       500     4             9         9             0.8        TRUE           TRUE
#> 3:  Fold3   0.4576471       500     4             9         9             0.8        TRUE           TRUE