-
-
Notifications
You must be signed in to change notification settings - Fork 8.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
SYCL. Unify calculations for objectives reg:absoluteerror, reg:quantileerror, binary:hinge #10993
Changes from 9 commits
d22460f
8b740b4
ce5603d
6870b7f
2c73ac7
e455de6
58828d9
b6453f0
896262f
ee366c5
a1d8745
e9690ab
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -49,6 +49,7 @@ void ElementWiseKernelHost(linalg::TensorView<T, D> t, std::int32_t n_threads, F | |
} | ||
|
||
#if !defined(XGBOOST_USE_CUDA) | ||
#if !defined(XGBOOST_USE_SYCL) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done |
||
template <typename T, int32_t D, typename Fn> | ||
void ElementWiseKernelDevice(linalg::TensorView<T, D>, Fn&&, void* = nullptr) { | ||
common::AssertGPUSupport(); | ||
|
@@ -66,6 +67,7 @@ void ElementWiseKernel(Context const* ctx, linalg::TensorView<T, D> t, Fn&& fn) | |
} | ||
ElementWiseKernelHost(t, ctx->Threads(), fn); | ||
} | ||
#endif // !defined(XGBOOST_USE_SYCL) | ||
#endif // !defined(XGBOOST_USE_CUDA) | ||
|
||
template <typename T, std::int32_t kDim> | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -45,7 +45,7 @@ namespace { | |
template <typename Fn> | ||
PackedReduceResult Reduce(Context const* ctx, MetaInfo const& info, Fn&& loss) { | ||
PackedReduceResult result; | ||
auto labels = info.labels.View(ctx->Device()); | ||
auto labels = info.labels.View(ctx->Device().IsSycl() ? DeviceOrd::CPU() : ctx->Device()); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I assume you want to use There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No. This function doesn't have sycl-specific implementation yet. So for the proper execution on |
||
if (ctx->IsCUDA()) { | ||
#if defined(XGBOOST_USE_CUDA) | ||
thrust::counting_iterator<size_t> begin(0); | ||
|
@@ -182,10 +182,11 @@ class PseudoErrorLoss : public MetricNoCache { | |
|
||
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override { | ||
CHECK_EQ(info.labels.Shape(0), info.num_row_); | ||
auto labels = info.labels.View(ctx_->Device()); | ||
preds.SetDevice(ctx_->Device()); | ||
auto device = ctx_->Device().IsSycl() ? DeviceOrd::CPU() : ctx_->Device(); | ||
auto labels = info.labels.View(device); | ||
preds.SetDevice(device); | ||
auto predts = ctx_->IsCUDA() ? preds.ConstDeviceSpan() : preds.ConstHostSpan(); | ||
info.weights_.SetDevice(ctx_->Device()); | ||
info.weights_.SetDevice(device); | ||
common::OptionalWeights weights(ctx_->IsCUDA() ? info.weights_.ConstDeviceSpan() | ||
: info.weights_.ConstHostSpan()); | ||
float slope = this->param_.huber_slope; | ||
|
@@ -349,11 +350,12 @@ struct EvalEWiseBase : public MetricNoCache { | |
if (info.labels.Size() != 0) { | ||
CHECK_NE(info.labels.Shape(1), 0); | ||
} | ||
auto labels = info.labels.View(ctx_->Device()); | ||
info.weights_.SetDevice(ctx_->Device()); | ||
auto device = ctx_->Device().IsSycl() ? DeviceOrd::CPU() : ctx_->Device(); | ||
auto labels = info.labels.View(device); | ||
info.weights_.SetDevice(device); | ||
common::OptionalWeights weights(ctx_->IsCUDA() ? info.weights_.ConstDeviceSpan() | ||
: info.weights_.ConstHostSpan()); | ||
preds.SetDevice(ctx_->Device()); | ||
preds.SetDevice(device); | ||
auto predts = ctx_->IsCUDA() ? preds.ConstDeviceSpan() : preds.ConstHostSpan(); | ||
|
||
auto d_policy = policy_; | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -48,7 +48,7 @@ std::string ObjFunction::GetSyclImplementationName(const std::string& name) { | |
return name + sycl_postfix; | ||
} else { | ||
// Function hasn't specific sycl implementation | ||
LOG(FATAL) << "`" << name << "` doesn't have sycl implementation yet\n"; | ||
// LOG(FATAL) << "`" << name << "` doesn't have sycl implementation yet\n"; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please remove the commented code. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done |
||
return name; | ||
} | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
/** | ||
* Copyright 2020-2024 by XGBoost Contributors | ||
*/ | ||
#ifndef XGBOOST_TEST_HINGE_H_ | ||
#define XGBOOST_TEST_HINGE_H_ | ||
|
||
#include <xgboost/context.h> // for Context | ||
|
||
namespace xgboost { | ||
|
||
void TestHingeObj(const Context* ctx); | ||
|
||
} // namespace xgboost | ||
|
||
#endif // XGBOOST_TEST_REGRESSION_OBJ_H_ |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Isn't this just
std::apply
?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It is, but
std::apply
can not be executed in sycl-kernel.