Skip to content

Commit

Permalink
Merge branch 'main' into xyt/topk
Browse files Browse the repository at this point in the history
  • Loading branch information
xytintel authored Jul 11, 2024
2 parents 796b2aa + 0253fb9 commit 1933c2c
Show file tree
Hide file tree
Showing 75 changed files with 3,570 additions and 1,265 deletions.
19 changes: 8 additions & 11 deletions .github/actions/inductor-xpu-e2e-test/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ runs:
shell: bash
run: |
source activate e2e_ci
source /opt/intel/oneapi/compiler/latest/env/vars.sh
source /opt/intel/oneapi/pytorch-gpu-dev-0.5/oneapi-vars.sh
if [[ ${{ inputs.suite }} == *"torchbench"* ]]; then
cd ../ && rm -rf audio && git clone --single-branch -b main https://github.com/pytorch/audio.git
cd audio && git checkout $TORCHAUDIO_COMMIT_ID
Expand Down Expand Up @@ -80,7 +80,7 @@ runs:
source activate e2e_ci
cp .github/scripts/inductor_xpu_test.sh ../pytorch
cd ../pytorch
source /opt/intel/oneapi/compiler/latest/env/vars.sh
source /opt/intel/oneapi/pytorch-gpu-dev-0.5/oneapi-vars.sh
rm -f ${{ github.workspace }}/summary_accuracy.log
# check param
function contains() {
Expand Down Expand Up @@ -110,14 +110,11 @@ runs:
contains "accuracy,performance" $scenario
$contains_status
if [ "${MODEL_ONLY_NAME}" == "" ];then
bash inductor_xpu_test.sh ${suite} ${dt} ${mode} ${scenario} xpu 0 static 8 0 &
bash inductor_xpu_test.sh ${suite} ${dt} ${mode} ${scenario} xpu 1 static 8 1 &
bash inductor_xpu_test.sh ${suite} ${dt} ${mode} ${scenario} xpu 2 static 8 2 &
bash inductor_xpu_test.sh ${suite} ${dt} ${mode} ${scenario} xpu 3 static 8 3 &
bash inductor_xpu_test.sh ${suite} ${dt} ${mode} ${scenario} xpu 4 static 8 4 &
bash inductor_xpu_test.sh ${suite} ${dt} ${mode} ${scenario} xpu 5 static 8 5 &
bash inductor_xpu_test.sh ${suite} ${dt} ${mode} ${scenario} xpu 6 static 8 6 &
bash inductor_xpu_test.sh ${suite} ${dt} ${mode} ${scenario} xpu 7 static 8 7 &
xpu_list=($(xpu-smi discovery |grep 'DRM Device: /dev/' |sed 's/.*card//;s/[^0-9].*//' |awk '{print $1 - 1":"NR - 1}'))
for xpu_id in ${xpu_list[*]}
do
bash inductor_xpu_test.sh ${suite} ${dt} ${mode} ${scenario} xpu ${xpu_id/:*} static ${#xpu_list[*]} ${xpu_id/*:} &
done
else
bash inductor_xpu_test.sh ${suite} ${dt} ${mode} ${scenario} xpu 0 static 1 0 ${MODEL_ONLY_NAME} &
fi
Expand Down Expand Up @@ -201,7 +198,7 @@ runs:
source activate e2e_ci
cp .github/scripts/inductor_perf_summary.py ../pytorch
cd ../pytorch
source /opt/intel/oneapi/compiler/latest/env/vars.sh
source /opt/intel/oneapi/pytorch-gpu-dev-0.5/oneapi-vars.sh
pip install styleFrame scipy pandas
set -xe
for suite in $(echo ${{ inputs.suite }} |sed 's/,/ /g')
Expand Down
1 change: 1 addition & 0 deletions .github/ci_commit_pins/torchbench.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
bb5294090a397b15fadf10cd2172f9bd9c461f9a
142 changes: 142 additions & 0 deletions .github/workflows/_linux_ut.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
name: Linux UT Test

on:
workflow_call:
inputs:
pytorch:
required: false
type: string
default: 'main'
description: Pytorch branch/commit
keep_torch_xpu_ops:
required: false
type: string
default: 'false'
description: Keep torch-xpu-ops pin. `true` means use pined commit
ut:
required: true
type: string
default: ''
description: UT scope. `op_example,op_extended,op_ut,torch_xpu` Delimiter is comma
python:
required: false
type: string
default: '3.10'
description: Python version
runner:
required: true
type: string
default: 'linux.idc.xpu'
description: Runner label


jobs:
Torch-XPU-UT-Tests:
runs-on: ${{ inputs.runner }}
timeout-minutes: 900
steps:
- name: Checkout torch-xpu-ops
uses: actions/checkout@v4
- name: Prepare Stock Pytorch
run: |
pwd
cd ../ && rm -rf pytorch
git clone https://github.com/pytorch/pytorch pytorch
cd pytorch && git checkout ${{ inputs.pytorch }}
# apply PRs for stock pytorch
pip install requests
python ../torch-xpu-ops/.github/scripts/apply_torch_pr.py
git status && git show -s
git submodule sync && git submodule update --init --recursive
if [[ ${{ inputs.keep_torch_xpu_ops }} == 'true' ]]; then
echo "Don't replace torch-xpu-ops!"
else
rm -rf third_party/torch-xpu-ops && cp -r ../torch-xpu-ops third_party/
# Workaround for torch-xpu-ops ci test
sed -i "s/checkout --quiet \${TORCH_XPU_OPS_COMMIT}/log -n 1/g" caffe2/CMakeLists.txt
fi
- name: Build Pytorch XPU
run: |
which conda && conda clean -ay
conda remove --all -y -n xpu_op_${ZE_AFFINITY_MASK} || \
rm -rf $(dirname ${CONDA_EXE})/../envs/xpu_op_${ZE_AFFINITY_MASK}
conda create -n xpu_op_${ZE_AFFINITY_MASK} python=${{ inputs.python }} cmake ninja -y
source activate xpu_op_${ZE_AFFINITY_MASK}
conda install -c intel mkl-static mkl-include -y
cd ../pytorch
pip install -r requirements.txt
export USE_XPU=1
source /opt/intel/oneapi/pytorch-gpu-dev-0.5/oneapi-vars.sh
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
WERROR=1 python setup.py bdist_wheel
pip install --force-reinstall dist/*.whl
git clone https://github.com/pytorch/vision && cd vision && python setup.py install && cd ..
pip install -r .ci/docker/requirements-ci.txt
- name: Run XPU OP Examples
if: contains(inputs.ut, 'op_example') || github.event_name == 'schedule'
run: |
cd ${{ github.workspace }}
xpu-smi discovery
source /opt/intel/oneapi/pytorch-gpu-dev-0.5/oneapi-vars.sh
source activate xpu_op_${ZE_AFFINITY_MASK}
cd ${{ github.workspace }}
cd examples
pip install pytest
timeout 8000 pytest -v
- name: Run XPU OP Extended UT
if: contains(inputs.ut, 'op_extended') || github.event_name == 'schedule'
run: |
source /opt/intel/oneapi/pytorch-gpu-dev-0.5/oneapi-vars.sh
source activate xpu_op_${ZE_AFFINITY_MASK}
export PYTORCH_TEST_WITH_SLOW=1
cd ../pytorch/third_party/torch-xpu-ops/test/xpu/extended/
timeout 10000 python run_test_with_skip.py
- name: Run XPU OP UT
if: contains(inputs.ut, 'op_ut') || github.event_name == 'schedule'
run: |
source /opt/intel/oneapi/pytorch-gpu-dev-0.5/oneapi-vars.sh
source activate xpu_op_${ZE_AFFINITY_MASK}
export PYTORCH_ENABLE_XPU_FALLBACK=1
export PYTORCH_TEST_WITH_SLOW=1
cd ../pytorch/third_party/torch-xpu-ops/test/xpu
timeout 10000 python run_test_with_skip.py
# Cases run with a on-demand white list, since some suites are too
# slow to go through all operators on CPU. So add cases on-demand
# when XPU implementatoin is done.
# test_foreach, test_decomp
timeout 10000 python run_test_with_only.py
- name: Run Torch XPU UT
if: contains(inputs.ut, 'torch_xpu') || github.event_name == 'schedule'
run: |
source /opt/intel/oneapi/pytorch-gpu-dev-0.5/oneapi-vars.sh
source activate xpu_op_${ZE_AFFINITY_MASK}
cd ../pytorch
TEST_REPORTS_DIR=$(pwd)/test/test-reports
rm -rf "$TEST_REPORTS_DIR" && mkdir -p "$TEST_REPORTS_DIR"
# Run Pytorch XPU binary UT
for xpu_case in build/bin/*{xpu,sycl}*; do
if [[ "$xpu_case" != *"*"* && "$xpu_case" != *.so && "$xpu_case" != *.a ]]; then
case_name=$(basename "$xpu_case")
echo "Testing ${case_name} ..."
"$xpu_case" --gtest_output=xml:"$TEST_REPORTS_DIR"/"$case_name".xml
fi
done
# Run Pytorch XPU python UT
export PYTORCH_TEST_WITH_SLOW=1
export PYTORCH_TESTING_DEVICE_ONLY_FOR="xpu"
test_cmd="python test/run_test.py --include "
# All Inductor UT under test/inductor
for test in $(ls test/inductor | grep test);
do
test_cmd="${test_cmd} inductor/$test";
done
# All xpu ut under test/xpu
for test in $(ls test/xpu | grep test);
do
test_cmd="${test_cmd} xpu/$test";
done
if [ -f "test/test_xpu.py" ]; then
test_cmd="${test_cmd} test_xpu.py"
fi
eval $test_cmd
137 changes: 0 additions & 137 deletions .github/workflows/inductor_xpu_e2e_ci.yml

This file was deleted.

Loading

0 comments on commit 1933c2c

Please sign in to comment.