Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improved some of the UT codes and cleaned the skip list. #799

Draft
wants to merge 27 commits into
base: main
Choose a base branch
from
Draft
Changes from 1 commit
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
a321dee
adjust tolerance for xpu
yuchengliu1 Aug 12, 2024
3e00b7c
update
yuchengliu1 Aug 12, 2024
62613f8
Merge branch 'main' into tolerance_adjust
yuchengliu1 Aug 12, 2024
a23732d
Merge branch 'main' into tolerance_adjust
yuchengliu1 Aug 13, 2024
7f06d6a
update skip_list
yuchengliu1 Aug 14, 2024
e9a1a15
Merge branch 'main' into tolerance_adjust
yuchengliu1 Aug 14, 2024
97ec9c5
update
yuchengliu1 Aug 14, 2024
7051142
update
yuchengliu1 Aug 14, 2024
16176cd
update
yuchengliu1 Aug 14, 2024
1a0d85a
Merge branch 'main' into tolerance_adjust
yuchengliu1 Aug 14, 2024
53fac08
update skiplist according ci
yuchengliu1 Aug 14, 2024
835326a
Merge branch 'main' into tolerance_adjust
yuchengliu1 Aug 15, 2024
cdc3f6a
update extented skiplist
yuchengliu1 Aug 18, 2024
11dbedb
Merge branch 'main' into tolerance_adjust
fengyuan14 Aug 19, 2024
593c98d
enhance extended hook
yuchengliu1 Aug 19, 2024
e81c7f5
update skiplist
yuchengliu1 Aug 19, 2024
cf3ec92
update skiplist
yuchengliu1 Aug 19, 2024
2337f35
fixed some test cases code and clean skip list
PenghuiCheng Aug 20, 2024
f1ffc1b
Merge from tolerance_adjust branch
PenghuiCheng Aug 20, 2024
7bd6268
Merge from main branch
PenghuiCheng Aug 22, 2024
e367dd2
update code
PenghuiCheng Aug 22, 2024
17b6c53
Update code
PenghuiCheng Aug 22, 2024
58137a5
Merge branch 'main' into penghui/clean_skip_list
PenghuiCheng Aug 26, 2024
55f3aff
Merge branch 'main' into penghui/clean_skip_list
PenghuiCheng Aug 27, 2024
738b416
Merge branch 'main' into penghui/clean_skip_list
PenghuiCheng Aug 29, 2024
201ded6
Merge branch 'main' into penghui/clean_skip_list
PenghuiCheng Sep 3, 2024
1ec82c5
Merge remote-tracking branch 'origin/main' into penghui/clean_skip_list
PenghuiCheng Sep 20, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
enhance extended hook
yuchengliu1 committed Aug 19, 2024
commit 593c98d2f0be48f75af40585604dba66659781dd
18 changes: 18 additions & 0 deletions test/xpu/extended/run_test_with_skip.py
Original file line number Diff line number Diff line change
@@ -52,6 +52,10 @@
"test_compare_cpu_nextafter_xpu_bfloat16",
# CUDA does not support the data type either
"test_non_standard_bool_values_native_dropout_backward_xpu_bool",
# TestCompositeCompliance
# CPU fallback fails
# Require implementing aten::embedding_renorm_
"test_view_replay_nn_functional_embedding_xpu_float32",
# Need FP64 golden ref for more accurate comparison
"test_compare_cpu_log_softmax_xpu_bfloat16",
# TestCompositeCompliance::test_cow_input
@@ -70,6 +74,11 @@
# https://github.com/pytorch/pytorch/blob/c97e3ebb96d7457075b019b94411e8c2d058e68b/aten/src/ATen/native/EmbeddingBag.cpp#L300
"test_compare_cpu_nn_functional_embedding_bag_xpu_bfloat16",
"test_compare_cpu_nn_functional_embedding_bag_xpu_float16",
# Not implemented operators, aten::embedding_renorm_.
# To retrieve cases when the operators are supported.
# https://github.com/intel/torch-xpu-ops/issues/380
"test_compare_cpu_nn_functional_embedding_bag_xpu_float32",
"test_compare_cpu_nn_functional_embedding_bag_xpu_float64",
# Double and complex datatype matmul is not supported in oneDNN
"test_compare_cpu_cdist_xpu_float64",
# bilinear interpolate includes large calculation steps, accuracy reduces in half-precision
@@ -108,6 +117,10 @@
# Not in CUDA test scope too
"test_compare_cpu_nn_functional_interpolate_bicubic_xpu_bfloat16",
"test_compare_cpu_nn_functional_interpolate_bicubic_xpu_float16",
# Not all operators are implemented for XPU tested in the case.
# Retrieve it once the operator is implemented.
# Error: The operator 'aten::glu_jvp' is not currently implemented for the XPU device.
"test_forward_ad_nn_functional_glu_xpu_float32",
# Precision error.
# Mismatched elements: 1 / 812 (0.1%)
# Greatest absolute difference: 0.03125 at index (610,) (up to 0.001 allowed)
@@ -116,6 +129,11 @@
# RuntimeError: Expected both inputs to be Half, Float or Double tensors but got BFloat16 and BFloat16.
# Polar's backward is calculated using complex(), which does not support bfloat16. CUDA fails with same error.
"test_compare_cpu_polar_xpu_bfloat16",
# Precision error.
# Mismatched elements: 1 / 25 (4.0%)
# Greatest absolute difference: 0.00146484375 at index (0, 0) (up to 0.001 allowed)
# Greatest relative difference: 0.0163116455078125 at index (0, 0) (up to 0.001 allowed)
"test_compare_cpu_sub_xpu_float16",
# different results for value index due to unstable sort.
# XPU and CUDA have the same result.
"test_compare_cpu_median_xpu_int16",
18 changes: 14 additions & 4 deletions test/xpu/extended/test_ops_xpu.py
Original file line number Diff line number Diff line change
@@ -63,10 +63,20 @@ class Namespace:
# Therefore, we build TestCommonProxy by inheriting the TestCommon and TestCase to ensure
# the same feature set as the TestCommon.
class TestCommonProxy(TestCase, TestCommonBase):
pass
def __init__(self, test_case = None):
if test_case:
# copy custom accuracy setting
self.maxDiff = test_case.maxDiff
self.precision = test_case.precision
self.rel_tol = test_case.rel_tol

class TestCompositeComplianceProxy(TestCase, TestCompositeComplianceBase):
pass
def __init__(self, test_case = None):
if test_case:
# copy custom accuracy setting
self.maxDiff = test_case.maxDiff
self.precision = test_case.precision
self.rel_tol = test_case.rel_tol


class TestCommon(TestCase):
@@ -78,13 +88,13 @@ class TestCommon(TestCase):
def test_compare_cpu(self, device, dtype, op):
# check if supported both by CPU and XPU
if dtype in op.dtypes and dtype in op.supported_dtypes(device):
self.proxy = Namespace.TestCommonProxy()
self.proxy = Namespace.TestCommonProxy(self)
test_common_test_fn = get_wrapped_fn(Namespace.TestCommonProxy.test_compare_cpu)
test_common_test_fn(self.proxy, device, dtype, op)
# for CUDA doesn't support operators
elif (op.name in _ops_without_cuda_support):
if dtype in op.dtypes:
self.proxy = Namespace.TestCommonProxy()
self.proxy = Namespace.TestCommonProxy(self)
test_common_test_fn = get_wrapped_fn(Namespace.TestCommonProxy.test_compare_cpu)
test_common_test_fn(self.proxy, device, dtype, op)
else:
12 changes: 1 addition & 11 deletions test/xpu/xpu_test_utils.py
Original file line number Diff line number Diff line change
@@ -270,7 +270,7 @@
torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02),
}
},
"aten2": {
"atan2": {
("TestCommon", "test_compare_cpu"): {
torch.bfloat16: tol(atol=0.008, rtol=0.005),
}
@@ -305,11 +305,6 @@
torch.float16: tol(atol=0.002, rtol=0.006),
}
},
"nn.functional.interpolate": {
("TestCommon", "test_compare_cpu"): {
torch.float16: tol(atol=0.003, rtol=0.002),
}
},
"prod": {
("TestCommon", "test_compare_cpu"): {
torch.bfloat16: tol(atol=0.002, rtol=0.005),
@@ -330,11 +325,6 @@
torch.bfloat16: tol(atol=0.008, rtol=0.005),
}
},
"nn.functional.nll_loss": {
("TestCommon", "test_compare_cpu"): {
torch.bfloat16: tol(atol=0.008, rtol=0.009),
}
},
}