File tree Expand file tree Collapse file tree 5 files changed +7
-13
lines changed
.ci/docker/ci_commit_pins
runtime/core/portable_type/c10/c10 Expand file tree Collapse file tree 5 files changed +7
-13
lines changed Original file line number Diff line number Diff line change 1
- 01f1cc44cbbfdf6307aa01b803a4ee22f9ade946
1
+ 64247892a0ca8ed045ad0b530eb87c3dd66590ea
Original file line number Diff line number Diff line change @@ -71,7 +71,7 @@ def python_is_compatible():
71
71
#
72
72
# NOTE: If you're changing, make the corresponding change in .ci/docker/ci_commit_pins/pytorch.txt
73
73
# by picking the hash from the same date in https://hud.pytorch.org/hud/pytorch/pytorch/nightly/
74
- NIGHTLY_VERSION = "dev20250422 "
74
+ NIGHTLY_VERSION = "dev20250602 "
75
75
76
76
77
77
def install_requirements (use_pytorch_nightly ):
@@ -82,7 +82,7 @@ def install_requirements(use_pytorch_nightly):
82
82
# been installed on CI before this step, so pip won't reinstall them
83
83
f"torch==2.8.0.{ NIGHTLY_VERSION } " if use_pytorch_nightly else "torch" ,
84
84
(
85
- f"torchvision==0.22 .0.{ NIGHTLY_VERSION } "
85
+ f"torchvision==0.23 .0.{ NIGHTLY_VERSION } "
86
86
if use_pytorch_nightly
87
87
else "torchvision"
88
88
), # For testing.
Original file line number Diff line number Diff line change @@ -241,7 +241,7 @@ using namespace c10::xpu;
241
241
#ifdef __HIPCC__
242
242
// Unlike CUDA, HIP requires a HIP header to be included for __host__ to work.
243
243
// We do this #include here so that C10_HOST_DEVICE and friends will Just Work.
244
- // See https://github.com/ROCm-Developer-Tools/HIP /issues/441
244
+ // See https://github.com/ROCm/hip /issues/441
245
245
#include < hip/hip_runtime.h>
246
246
#endif
247
247
@@ -286,7 +286,7 @@ constexpr uint32_t CUDA_THREADS_PER_BLOCK_FALLBACK = 256;
286
286
#define C10_MIN_BLOCKS_PER_SM (threads_per_block, blocks_per_sm ) \
287
287
((((threads_per_block) * (blocks_per_sm) <= CUDA_MAX_THREADS_PER_SM) \
288
288
? (blocks_per_sm) \
289
- : ((CUDA_MAX_THREADS_PER_SM + (threads_per_block)- 1 ) / \
289
+ : ((CUDA_MAX_THREADS_PER_SM + (threads_per_block) - 1 ) / \
290
290
(threads_per_block))))
291
291
// C10_LAUNCH_BOUNDS is analogous to __launch_bounds__
292
292
#define C10_LAUNCH_BOUNDS_0 \
Original file line number Diff line number Diff line change @@ -10,14 +10,11 @@ C10_CLANG_DIAGNOSTIC_PUSH()
10
10
C10_CLANG_DIAGNOSTIC_IGNORE (" -Wimplicit-int-float-conversion" )
11
11
#endif
12
12
13
- #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
14
13
#if defined(CL_SYCL_LANGUAGE_VERSION)
15
14
#include < CL/sycl.hpp> // for SYCL 1.2.1
16
- #else
15
+ #elif defined(SYCL_LANGUAGE_VERSION)
17
16
#include < sycl/sycl.hpp> // for SYCL 2020
18
17
#endif
19
- #include < ext/oneapi/bfloat16.hpp>
20
- #endif
21
18
22
19
namespace c10 {
23
20
Original file line number Diff line number Diff line change 14
14
#include < cuda_bf16.h>
15
15
#endif
16
16
17
- #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
18
17
#if defined(CL_SYCL_LANGUAGE_VERSION)
19
18
#include < CL/sycl.hpp> // for SYCL 1.2.1
20
- #else
19
+ #elif defined(SYCL_LANGUAGE_VERSION)
21
20
#include < sycl/sycl.hpp> // for SYCL 2020
22
21
#endif
23
- #include < ext/oneapi/bfloat16.hpp>
24
- #endif
25
22
26
23
namespace c10 {
27
24
You can’t perform that action at this time.
0 commit comments