Skip to content

Commit

Permalink
avoid warnings and prints
Browse files Browse the repository at this point in the history
  • Loading branch information
david-cortes committed Dec 9, 2024
1 parent 94c6714 commit 245f222
Show file tree
Hide file tree
Showing 7 changed files with 24 additions and 16 deletions.
7 changes: 4 additions & 3 deletions R-package/tests/testthat/test_basic.R
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,8 @@ test_that("train and predict RF", {
objective = "binary:logistic", eval_metric = "error",
num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1
),
evals = list(train = xgb.DMatrix(train$data, label = lb))
evals = list(train = xgb.DMatrix(train$data, label = lb)),
verbose = 0
)
expect_equal(xgb.get.num.boosted.rounds(bst), 1)

Expand Down Expand Up @@ -488,9 +489,9 @@ test_that("max_delta_step works", {
)
nrounds <- 5
# model with no restriction on max_delta_step
bst1 <- xgb.train(params, dtrain, nrounds, evals = evals, verbose = 1)
bst1 <- xgb.train(params, dtrain, nrounds, evals = evals, verbose = 0)
# model with restricted max_delta_step
bst2 <- xgb.train(c(params, list(max_delta_step = 1)), dtrain, nrounds, evals = evals, verbose = 1)
bst2 <- xgb.train(c(params, list(max_delta_step = 1)), dtrain, nrounds, evals = evals, verbose = 0)
# the no-restriction model is expected to have consistently lower loss during the initial iterations
expect_true(all(attributes(bst1)$evaluation_log$train_logloss < attributes(bst2)$evaluation_log$train_logloss))
expect_lt(mean(attributes(bst1)$evaluation_log$train_logloss) / mean(attributes(bst2)$evaluation_log$train_logloss), 0.8)
Expand Down
3 changes: 2 additions & 1 deletion R-package/tests/testthat/test_callbacks.R
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ test_that("early stopping xgb.train works", {
)
expect_equal(attributes(bst)$evaluation_log, attributes(bst0)$evaluation_log)

fname <- file.path(tempdir(), "model.bin")
fname <- file.path(tempdir(), "model.ubj")
xgb.save(bst, fname)
loaded <- xgb.load(fname)

Expand Down Expand Up @@ -335,6 +335,7 @@ test_that("early stopping works with titanic", {
),
nrounds = 100,
early_stopping_rounds = 3,
verbose = 0,
evals = list(train = xgb.DMatrix(dtx, label = dty))
)

Expand Down
10 changes: 6 additions & 4 deletions R-package/tests/testthat/test_custom_objective.R
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ param <- list(max_depth = 2, eta = 1, nthread = n_threads,
num_round <- 2

test_that("custom objective works", {
bst <- xgb.train(param, dtrain, num_round, evals)
bst <- xgb.train(param, dtrain, num_round, evals, verbose = 0)
expect_equal(class(bst), "xgb.Booster")
expect_false(is.null(attributes(bst)$evaluation_log))
expect_false(is.null(attributes(bst)$evaluation_log$eval_error))
Expand All @@ -48,7 +48,7 @@ test_that("custom objective in CV works", {
})

test_that("custom objective with early stop works", {
bst <- xgb.train(param, dtrain, 10, evals)
bst <- xgb.train(param, dtrain, 10, evals, verbose = 0)
expect_equal(class(bst), "xgb.Booster")
train_log <- attributes(bst)$evaluation_log$train_error
expect_true(all(diff(train_log) <= 0))
Expand All @@ -66,7 +66,7 @@ test_that("custom objective using DMatrix attr works", {
return(list(grad = grad, hess = hess))
}
param$objective <- logregobjattr
bst <- xgb.train(param, dtrain, num_round, evals)
bst <- xgb.train(param, dtrain, num_round, evals, verbose = 0)
expect_equal(class(bst), "xgb.Booster")
})

Expand All @@ -89,7 +89,9 @@ test_that("custom objective with multi-class shape", {
}
param$objective <- fake_softprob
param$eval_metric <- fake_merror
bst <- xgb.train(c(param, list(num_class = n_classes)), dtrain, 1)
expect_warning({
bst <- xgb.train(c(param, list(num_class = n_classes)), dtrain, nrounds = 1)
})
})

softmax <- function(values) {
Expand Down
7 changes: 4 additions & 3 deletions R-package/tests/testthat/test_dmatrix.R
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,13 @@ test_that("xgb.DMatrix: basic construction", {

params <- list(tree_method = "hist", nthread = n_threads)
bst_fd <- xgb.train(
params, nrounds = 8, fd, evals = list(train = fd)
params, nrounds = 8, fd, evals = list(train = fd), verbose = 0
)
bst_dgr <- xgb.train(
params, nrounds = 8, fdgr, evals = list(train = fdgr)
params, nrounds = 8, fdgr, evals = list(train = fdgr), verbose = 0
)
bst_dgc <- xgb.train(
params, nrounds = 8, fdgc, evals = list(train = fdgc)
params, nrounds = 8, fdgc, evals = list(train = fdgc), verbose = 0
)

raw_fd <- xgb.save.raw(bst_fd, raw_format = "ubj")
Expand Down Expand Up @@ -130,6 +130,7 @@ test_that("xgb.DMatrix: saving, loading", {
expect_equal(length(cnames), 126)
tmp_file <- tempfile('xgb.DMatrix_')
xgb.DMatrix.save(dtrain, tmp_file)
xgb.set.config(verbosity = 0)
dtrain <- xgb.DMatrix(tmp_file)
expect_equal(colnames(dtrain), cnames)

Expand Down
8 changes: 5 additions & 3 deletions R-package/tests/testthat/test_glm.R
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ test_that("gblinear early stopping works", {
agaricus.test$data, label = agaricus.test$label, nthread = n_threads
)

param <- list(
param <- xgb.params(
objective = "binary:logistic", eval_metric = "error", booster = "gblinear",
nthread = n_threads, eta = 0.8, alpha = 0.0001, lambda = 0.0001,
updater = "coord_descent"
Expand All @@ -70,14 +70,16 @@ test_that("gblinear early stopping works", {
es_round <- 1
n <- 10
booster <- xgb.train(
param, dtrain, n, list(eval = dtest, train = dtrain), early_stopping_rounds = es_round
param, dtrain, nrounds = n, evals = list(eval = dtest, train = dtrain),
early_stopping_rounds = es_round, verbose = 0
)
expect_equal(xgb.attr(booster, "best_iteration"), 4)
predt_es <- predict(booster, dtrain)

n <- xgb.attr(booster, "best_iteration") + es_round + 1
booster <- xgb.train(
param, dtrain, n, list(eval = dtest, train = dtrain), early_stopping_rounds = es_round
param, dtrain, nrounds = n, evals = list(eval = dtest, train = dtrain),
early_stopping_rounds = es_round, verbose = 0
)
predt <- predict(booster, dtrain)
expect_equal(predt_es, predt)
Expand Down
1 change: 1 addition & 0 deletions R-package/tests/testthat/test_helpers.R
Original file line number Diff line number Diff line change
Expand Up @@ -464,6 +464,7 @@ test_that("xgb.plot.multi.trees works with and without feature names", {
.skip_if_vcd_not_available()
xgb.plot.multi.trees(model = bst.Tree.unnamed, features_keep = 3)
xgb.plot.multi.trees(model = bst.Tree, features_keep = 3)
expect_true(TRUE)
})

test_that("xgb.plot.deepness works", {
Expand Down
4 changes: 2 additions & 2 deletions R-package/tests/testthat/test_ranking.R
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ test_that('Test ranking with unweighted data', {

params <- list(eta = 1, tree_method = 'exact', objective = 'rank:pairwise', max_depth = 1,
eval_metric = 'auc', eval_metric = 'aucpr', nthread = n_threads)
bst <- xgb.train(params, dtrain, nrounds = 10, evals = list(train = dtrain))
bst <- xgb.train(params, dtrain, nrounds = 10, evals = list(train = dtrain), verbose = 0)
# Check if the metric is monotone increasing
expect_true(all(diff(attributes(bst)$evaluation_log$train_auc) >= 0))
expect_true(all(diff(attributes(bst)$evaluation_log$train_aucpr) >= 0))
Expand All @@ -39,7 +39,7 @@ test_that('Test ranking with weighted data', {
eta = 1, tree_method = "exact", objective = "rank:pairwise", max_depth = 1,
eval_metric = "auc", eval_metric = "aucpr", nthread = n_threads
)
bst <- xgb.train(params, dtrain, nrounds = 10, evals = list(train = dtrain))
bst <- xgb.train(params, dtrain, nrounds = 10, evals = list(train = dtrain), verbose = 0)
# Check if the metric is monotone increasing
expect_true(all(diff(attributes(bst)$evaluation_log$train_auc) >= 0))
expect_true(all(diff(attributes(bst)$evaluation_log$train_aucpr) >= 0))
Expand Down

0 comments on commit 245f222

Please sign in to comment.