Skip to content

Commit 58137a5

Browse files
authored
Merge branch 'main' into penghui/clean_skip_list
2 parents 17b6c53 + 12d7ee7 commit 58137a5

File tree

6 files changed

+42
-13
lines changed

6 files changed

+42
-13
lines changed

.github/actions/inductor-xpu-e2e-test/action.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ runs:
4848
source activate e2e_ci
4949
source .github/scripts/env.sh
5050
if [[ ${{ inputs.suite }} == *"torchbench"* ]]; then
51-
if [[ "${{ inputs.pytorch }}" != *" wheel"* ]]; then
51+
if [ "${{ inputs.pytorch }}" != "nightly_wheel" ]; then
5252
cd ../ && rm -rf audio && git clone --single-branch -b main https://github.com/pytorch/audio.git
5353
cd audio && git checkout $TORCHAUDIO_COMMIT_ID
5454
python setup.py bdist_wheel && pip uninstall torchaudio -y && pip install dist/*.whl
@@ -70,7 +70,7 @@ runs:
7070
pip install --force-reinstall git+https://github.com/huggingface/transformers@${TRANSFORMERS_VERSION}
7171
fi
7272
if [[ ${{ inputs.suite }} == *"timm_models"* ]]; then
73-
if [[ "${{ inputs.pytorch }}" != *" wheel"* ]]; then
73+
if [ "${{ inputs.pytorch }}" != "nightly_wheel" ]; then
7474
cd ../ && rm -rf vision && git clone --single-branch -b main https://github.com/pytorch/vision.git
7575
cd vision && git checkout $TORCHVISION_COMMIT_ID
7676
python setup.py bdist_wheel && pip uninstall torchvision -y && pip install dist/*.whl

.github/workflows/_linux_ut.yml

+14-3
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,11 @@ on:
1313
type: string
1414
default: 'false'
1515
description: Keep torch-xpu-ops pin. `true` means use pined commit
16+
triton:
17+
required: false
18+
type: string
19+
default: ''
20+
description: Triton commit. Use pytorch pined commit by default
1621
ut:
1722
required: true
1823
type: string
@@ -71,9 +76,15 @@ jobs:
7176
source activate xpu_op_${ZE_AFFINITY_MASK}
7277
cd ../pytorch
7378
TRITON_REPO="https://github.com/intel/intel-xpu-backend-for-triton"
74-
TRITON_COMMIT_ID="$(<.ci/docker/ci_commit_pins/triton-xpu.txt)"
79+
if [ -z ${{ inputs.triton }} ]; then
80+
TRITON_COMMIT_ID="$(<.ci/docker/ci_commit_pins/triton-xpu.txt)"
81+
else
82+
TRITON_COMMIT_ID="${{ inputs.triton }}"
83+
fi
7584
echo ${TRITON_REPO}@${TRITON_COMMIT_ID}
76-
pip install --force-reinstall "git+${TRITON_REPO}@${TRITON_COMMIT_ID}#subdirectory=python"
85+
if [ "${{ inputs.pytorch }}" != "nightly_wheel" ]; then
86+
pip install --force-reinstall "git+${TRITON_REPO}@${TRITON_COMMIT_ID}#subdirectory=python"
87+
fi
7788
- name: Build Pytorch XPU
7889
run: |
7990
source activate xpu_op_${ZE_AFFINITY_MASK}
@@ -85,7 +96,7 @@ jobs:
8596
else
8697
export _GLIBCXX_USE_CXX11_ABI=1
8798
fi
88-
if [[ "${{ inputs.pytorch }}" != *" wheel"* ]]; then
99+
if [ "${{ inputs.pytorch }}" != "nightly_wheel" ]; then
89100
export CMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH}:${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
90101
pip install -r requirements.txt
91102
WERROR=1 python setup.py bdist_wheel

.github/workflows/nightly_ondemand.yml

+1
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@ jobs:
7474
ut: ${{ github.event_name == 'schedule' && 'op_example,op_extended,op_ut,torch_xpu' || inputs.ut }}
7575
pytorch: ${{ github.event_name == 'schedule' && 'main' || inputs.pytorch }}
7676
python: ${{ github.event_name == 'schedule' && '3.10' || inputs.python }}
77+
triton: ${{ github.event_name == 'schedule' && '' || inputs.triton }}
7778
runner: linux.idc.xpu
7879

7980
Linux-Weekly-UT-Tests-ABI-0:

.github/workflows/nightly_ondemand_whl.yml

+8-8
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ jobs:
6262
with:
6363
ut: ${{ github.event_name == 'schedule' && 'op_example,op_extended,op_ut,torch_xpu' || inputs.ut }}
6464
python: ${{ github.event_name == 'schedule' && '3.10' || inputs.python }}
65-
pytorch: nightly wheel
65+
pytorch: nightly_wheel
6666
runner: linux.idc.xpu
6767

6868
Linux-Nightly-Ondemand-E2E-WHL-Tests:
@@ -151,7 +151,7 @@ jobs:
151151
dt: float32,bfloat16,float16,amp_bf16,amp_fp16
152152
mode: inference,training
153153
scenario: accuracy
154-
pytorch: nightly wheel
154+
pytorch: nightly_wheel
155155
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
156156
- name: Nightly Torchbench BF16 Training Accuracy Test
157157
if: github.event_name == 'schedule' && github.event.schedule == '0 14 * * 0-4'
@@ -161,7 +161,7 @@ jobs:
161161
dt: bfloat16
162162
mode: training
163163
scenario: accuracy
164-
pytorch: nightly wheel
164+
pytorch: nightly_wheel
165165
env_prepare: true
166166
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
167167
- name: Nightly Timm_models FP16 Training Accuracy Test
@@ -172,7 +172,7 @@ jobs:
172172
dt: float16
173173
mode: training
174174
scenario: accuracy
175-
pytorch: nightly wheel
175+
pytorch: nightly_wheel
176176
env_prepare: true
177177
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
178178
# Weekly launch
@@ -185,7 +185,7 @@ jobs:
185185
dt: float32,bfloat16,float16,amp_bf16,amp_fp16
186186
mode: inference,training
187187
scenario: accuracy,performance
188-
pytorch: nightly wheel
188+
pytorch: nightly_wheel
189189
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
190190
- name: Weekly Torchbench Full Test
191191
if: github.event_name == 'schedule' && github.event.schedule == '0 17 * * 5'
@@ -196,7 +196,7 @@ jobs:
196196
dt: float32,bfloat16,float16,amp_bf16,amp_fp16
197197
mode: inference,training
198198
scenario: accuracy,performance
199-
pytorch: nightly wheel
199+
pytorch: nightly_wheel
200200
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
201201
- name: Weekly Timm_models Full Test
202202
if: github.event_name == 'schedule' && github.event.schedule == '0 17 * * 5'
@@ -207,7 +207,7 @@ jobs:
207207
dt: float32,bfloat16,float16,amp_bf16,amp_fp16
208208
mode: inference,training
209209
scenario: accuracy,performance
210-
pytorch: nightly wheel
210+
pytorch: nightly_wheel
211211
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
212212
# On-demand launch
213213
- name: OnDemand Test (${{ inputs.suite }} ${{ inputs.dt }} ${{ inputs.mode }} ${{ inputs.scenario }})
@@ -219,7 +219,7 @@ jobs:
219219
dt: ${{ inputs.dt }}
220220
mode: ${{ inputs.mode }}
221221
scenario: ${{ inputs.scenario }}
222-
pytorch: nightly wheel
222+
pytorch: nightly_wheel
223223
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
224224

225225
- name: Summarize archieve files

test/xpu/skip_list_common.py

+12
Original file line numberDiff line numberDiff line change
@@ -1595,6 +1595,8 @@
15951595
"test_matmul_check_entries_tunableop_xpu_float16",
15961596
"test_minimum_tuning_iteration_tunableop_xpu_float16",
15971597
"test_validator_tunableop_rocm_xpu_float32",
1598+
"test_addmm_relu_tunableop_rocm_xpu_float32",
1599+
"test_addmm_relu_tunableop_rocm_xpu_float64",
15981600
),
15991601

16001602
"test_ops_fwd_gradients_xpu.py": (
@@ -1874,6 +1876,10 @@
18741876
# NotImplementedError: Could not run 'aten::_to_dense' with arguments from the 'SparseXPU' backend.
18751877
"test_fn_fwgrad_bwgrad_to_sparse_xpu_float64",
18761878
"test_forward_mode_AD_to_sparse_xpu_float64",
1879+
1880+
# issue: https://github.com/intel/torch-xpu-ops/issues/809
1881+
"test_fn_fwgrad_bwgrad_nn_functional_conv3d_xpu_complex128",
1882+
"test_fn_fwgrad_bwgrad_nn_functional_conv3d_xpu_float64",
18771883
),
18781884

18791885
"test_matmul_cuda_xpu.py": (
@@ -2390,6 +2396,10 @@
23902396
# https://github.com/intel/torch-xpu-ops/issues/357
23912397
"test_fn_grad_to_sparse_xpu_float64",
23922398
"test_fn_gradgrad_to_sparse_xpu_float64",
2399+
2400+
# issue: https://github.com/intel/torch-xpu-ops/issues/809
2401+
"test_fn_gradgrad_nn_functional_conv3d_xpu_complex128",
2402+
"test_fn_gradgrad_nn_functional_conv3d_xpu_float64",
23932403
),
23942404

23952405
"test_torch_xpu.py": (
@@ -2594,6 +2604,8 @@
25942604
# accuracy issue, TODO
25952605
"test_Conv2d_naive_groups_xpu_float16",
25962606
"test_Conv2d_groups_nobias",
2607+
# issue: https://github.com/intel/torch-xpu-ops/issues/809
2608+
"test_thnn_conv_strided_padded_dilated",
25972609
),
25982610

25992611
"test_dynamic_shapes_xpu.py": None,

test/xpu/xpu_test_utils.py

+5
Original file line numberDiff line numberDiff line change
@@ -663,6 +663,11 @@ def align_supported_dtypes(self, db):
663663
backward_dtypes.add(bfloat16)
664664
opinfo.backward_dtypes = tuple(backward_dtypes)
665665

666+
if "has_fp64=0" in str(torch.xpu.get_device_properties(0)):
667+
fp64_dtypes = [ torch.float64, torch.complex128, torch.double, ]
668+
opinfo.dtypesIfXPU = set(filter(lambda x: (x not in fp64_dtypes), list(opinfo.dtypesIfXPU)))
669+
opinfo.backward_dtypes = tuple(filter(lambda x: (x not in fp64_dtypes), list(opinfo.backward_dtypes)))
670+
666671
def __enter__(self):
667672
# Monkey patch until we have a fancy way
668673

0 commit comments

Comments
 (0)