Skip to content

Commit

Permalink
rename.
Browse files Browse the repository at this point in the history
  • Loading branch information
trivialfis committed Sep 22, 2024
1 parent e034fe1 commit f38413b
Show file tree
Hide file tree
Showing 10 changed files with 24 additions and 27 deletions.
2 changes: 1 addition & 1 deletion doc/parameter.rst
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ Parameters for Non-Exact Tree Methods
trees. After 3.0, this parameter affects GPU algorithms as well.


* ``external_memory_concat_pages``, [default = ``false``]
* ``extmem_concat_pages``, [default = ``false``]

This parameter is only used for the ``hist`` tree method with ``device=cuda`` and
``subsample != 1.0``. Before 3.0, pages were always concatenated.
Expand Down
10 changes: 5 additions & 5 deletions doc/tutorials/external_memory.rst
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,10 @@ The external memory support has undergone multiple development iterations. Like
:py:class:`~xgboost.QuantileDMatrix` with :py:class:`~xgboost.DataIter`, XGBoost loads
data batch-by-batch using a custom iterator supplied by the user. However, unlike the
:py:class:`~xgboost.QuantileDMatrix`, external memory does not concatenate the batches
(unless specified by the ``external_memory_concat_pages``) . Instead, it caches all
batches in the external memory and fetch them on-demand. Go to the end of the document to
see a comparison between :py:class:`~xgboost.QuantileDMatrix` and the external memory
version of :py:class:`~xgboost.ExtMemQuantileDMatrix`.
(unless specified by the ``extmem_concat_pages``) . Instead, it caches all batches in the
external memory and fetch them on-demand. Go to the end of the document to see a
comparison between :py:class:`~xgboost.QuantileDMatrix` and the external memory version of
:py:class:`~xgboost.ExtMemQuantileDMatrix`.

**Contents**

Expand Down Expand Up @@ -182,7 +182,7 @@ concatenation can be enabled by:
param = {
"device": "cuda",
"external_memory_concat_pages": true,
"extmem_concat_pages": true,
'subsample': 0.2,
'sampling_method': 'gradient_based',
}
Expand Down
4 changes: 2 additions & 2 deletions src/common/error_msg.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,8 @@ inline auto NoCategorical(std::string name) {

inline void NoPageConcat(bool concat_pages) {
if (concat_pages) {
LOG(FATAL) << "`external_memory_concat_pages` must be false when there's no sampling or when "
"it's running on the CPU.";
LOG(FATAL) << "`extmem_concat_pages` must be false when there's no sampling or when it's "
"running on the CPU.";
}
}
} // namespace xgboost::error
Expand Down
4 changes: 2 additions & 2 deletions src/tree/hist/param.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ struct HistMakerTrainParam : public XGBoostParameter<HistMakerTrainParam> {
constexpr static std::size_t CudaDefaultNodes() { return static_cast<std::size_t>(1) << 12; }

bool debug_synchronize{false};
bool external_memory_concat_pages{false};
bool extmem_concat_pages{false};

void CheckTreesSynchronized(Context const* ctx, RegTree const* local_tree) const;

Expand All @@ -43,7 +43,7 @@ struct HistMakerTrainParam : public XGBoostParameter<HistMakerTrainParam> {
.set_default(NotSet())
.set_lower_bound(1)
.describe("Maximum number of nodes in histogram cache.");
DMLC_DECLARE_FIELD(external_memory_concat_pages).set_default(false);
DMLC_DECLARE_FIELD(extmem_concat_pages).set_default(false);
}
};
} // namespace xgboost::tree
2 changes: 1 addition & 1 deletion src/tree/updater_approx.cc
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ class GlobalApproxUpdater : public TreeUpdater {
*sampled = linalg::Empty<GradientPair>(ctx_, gpair->Size(), 1);
auto in = gpair->HostView().Values();
std::copy(in.data(), in.data() + in.size(), sampled->HostView().Values().data());
error::NoPageConcat(this->hist_param_.external_memory_concat_pages);
error::NoPageConcat(this->hist_param_.extmem_concat_pages);
SampleGradient(ctx_, param, sampled->HostView());
}

Expand Down
2 changes: 1 addition & 1 deletion src/tree/updater_gpu_hist.cu
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ struct GPUHistMakerDevice {
interaction_constraints(param, static_cast<bst_feature_t>(info.num_col_)),
sampler{std::make_unique<GradientBasedSampler>(
ctx, info.num_row_, batch_param, param.subsample, param.sampling_method,
batch_ptr_.size() > 2 && this->hist_param_->external_memory_concat_pages)} {
batch_ptr_.size() > 2 && this->hist_param_->extmem_concat_pages)} {
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
Expand Down
2 changes: 1 addition & 1 deletion src/tree/updater_quantile_hist.cc
Original file line number Diff line number Diff line change
Expand Up @@ -539,7 +539,7 @@ class QuantileHistMaker : public TreeUpdater {
// Copy gradient into buffer for sampling. This converts C-order to F-order.
std::copy(linalg::cbegin(h_gpair), linalg::cend(h_gpair), linalg::begin(h_sample_out));
}
error::NoPageConcat(this->hist_param_.external_memory_concat_pages);
error::NoPageConcat(this->hist_param_.extmem_concat_pages);
SampleGradient(ctx_, *param, h_sample_out);
auto *h_out_position = &out_position[tree_it - trees.begin()];
if ((*tree_it)->IsMultiTarget()) {
Expand Down
2 changes: 1 addition & 1 deletion tests/cpp/tree/gpu_hist/test_gradient_based_sampler.cu
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ TEST(GradientBasedSampler, NoSamplingExternalMemory) {
[&] {
GradientBasedSampler sampler(&ctx, kRows, param, kSubsample, TrainParam::kUniform, true);
},
GMockThrow("external_memory_concat_pages"));
GMockThrow("extmem_concat_pages"));
}

TEST(GradientBasedSampler, UniformSampling) {
Expand Down
21 changes: 9 additions & 12 deletions tests/cpp/tree/test_gpu_hist.cu
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ TEST(GpuHist, ExternalMemoryWithSampling) {
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, ctx.Device());
UpdateTree(&ctx, &gpair, p_fmat_ext.get(), &tree_ext, &preds_ext, kSubsample, kSamplingMethod,
kRows,true);
kRows, true);

Json jtree{Object{}};
Json jtree_ext{Object{}};
Expand Down Expand Up @@ -237,34 +237,31 @@ TEST(GpuHist, PageConcatConfig) {

auto learner = std::unique_ptr<Learner>(Learner::Create({p_fmat}));
learner->SetParam("device", ctx.DeviceName());
learner->SetParam("external_memory_concat_pages", "true");
learner->SetParam("extmem_concat_pages", "true");
learner->SetParam("subsample", "0.8");
learner->Configure();

learner->UpdateOneIter(0, p_fmat);
learner->SetParam("external_memory_concat_pages", "false");
learner->SetParam("extmem_concat_pages", "false");
learner->Configure();
// GPU Hist rebuilds the updater after configuration. Training continues
learner->UpdateOneIter(1, p_fmat);

learner->SetParam("external_memory_concat_pages", "true");
learner->SetParam("extmem_concat_pages", "true");
learner->SetParam("subsample", "1.0");
ASSERT_THAT([&] { learner->UpdateOneIter(2, p_fmat); },
GMockThrow("external_memory_concat_pages"));
ASSERT_THAT([&] { learner->UpdateOneIter(2, p_fmat); }, GMockThrow("extmem_concat_pages"));

// Throws error on CPU.
{
auto learner = std::unique_ptr<Learner>(Learner::Create({p_fmat}));
learner->SetParam("external_memory_concat_pages", "true");
ASSERT_THAT([&] { learner->UpdateOneIter(0, p_fmat); },
GMockThrow("external_memory_concat_pages"));
learner->SetParam("extmem_concat_pages", "true");
ASSERT_THAT([&] { learner->UpdateOneIter(0, p_fmat); }, GMockThrow("extmem_concat_pages"));
}
{
auto learner = std::unique_ptr<Learner>(Learner::Create({p_fmat}));
learner->SetParam("external_memory_concat_pages", "true");
learner->SetParam("extmem_concat_pages", "true");
learner->SetParam("tree_method", "approx");
ASSERT_THAT([&] { learner->UpdateOneIter(0, p_fmat); },
GMockThrow("external_memory_concat_pages"));
ASSERT_THAT([&] { learner->UpdateOneIter(0, p_fmat); }, GMockThrow("extmem_concat_pages"));
}
}

Expand Down
2 changes: 1 addition & 1 deletion tests/python-gpu/test_gpu_data_iterator.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def test_concat_pages() -> None:
"device": "cuda",
"subsample": 0.5,
"sampling_method": "gradient_based",
"external_memory_concat_pages": True,
"extmem_concat_pages": True,
"objective": "reg:absoluteerror",
},
Xy,
Expand Down

0 comments on commit f38413b

Please sign in to comment.