Skip to content

Commit

Permalink
unittest: Remove skips from skip list (#691)
Browse files Browse the repository at this point in the history
Remove unnecessary cases from skip list after evaluating
#256

Signed-off-by: Cheng Penghui <[email protected]>
  • Loading branch information
PenghuiCheng authored Aug 6, 2024
1 parent f1c4c85 commit e6dee02
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 23 deletions.
22 changes: 0 additions & 22 deletions test/xpu/run_test_with_skip.py
Original file line number Diff line number Diff line change
Expand Up @@ -1134,37 +1134,18 @@ def launch_test(test_case, skip_list=None, exe_list=None):
"test_save_load_nn_TransformerEncoder_eval_mode_xpu_float64",
"test_save_load_nn_TransformerEncoder_train_mode_xpu_float64",
"test_save_load_nn_Transformer_xpu_float64",
# AssertionError: Tensor-likes are not close!
"test_cpu_gpu_parity_nn_ConvTranspose3d_xpu_complex32",
# Unexpected success:
"test_cpu_gpu_parity_nn_ConvTranspose2d_xpu_complex32",
"test_cpu_gpu_parity_nn_ConvTranspose1d_xpu_complex32",
"test_memory_format_nn_AvgPool2d_xpu_float32",
"test_memory_format_nn_AvgPool2d_xpu_float64",
# AssertionError: False is not true
"test_memory_format_nn_Conv2d_xpu_float64",
"test_memory_format_nn_ConvTranspose2d_xpu_float64",
"test_memory_format_nn_LazyConv2d_xpu_float64",
"test_memory_format_nn_LazyConvTranspose2d_xpu_float64",
# CPU fallback could not cover these
# CUDA xfails
# Failed: Unexpected success
"test_memory_format_nn_AdaptiveAvgPool2d_xpu_float32",
"test_memory_format_nn_AdaptiveAvgPool2d_xpu_float64",
# CPU fallback fails
# AssertionError: False is not true
"test_memory_format_nn_ReflectionPad3d_xpu_float32",
"test_memory_format_nn_ReflectionPad3d_xpu_float64",
"test_memory_format_nn_ReplicationPad2d_xpu_float32",
"test_memory_format_nn_ReplicationPad2d_xpu_float64",
"test_memory_format_nn_ReplicationPad3d_xpu_float32",
"test_memory_format_nn_ReplicationPad3d_xpu_float64",
# CPU fallback fails
# RuntimeError: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
"test_memory_format_nn_GroupNorm_xpu_bfloat16",
"test_memory_format_nn_GroupNorm_xpu_float16",
"test_memory_format_nn_GroupNorm_xpu_float32",
"test_memory_format_nn_GroupNorm_xpu_float64",
"test_save_load_nn_GRU_eval_mode_xpu_float32",
"test_save_load_nn_GRUCell_xpu_float32",
"test_save_load_nn_GRU_train_mode_xpu_float32",
Expand All @@ -1189,9 +1170,6 @@ def launch_test(test_case, skip_list=None, exe_list=None):
"test_non_contiguous_tensors_nn_GRUCell_xpu_float32",
"test_non_contiguous_tensors_nn_GRU_eval_mode_xpu_float32",
"test_non_contiguous_tensors_nn_GRU_train_mode_xpu_float32",
# Reflection_pad2d doesn't support channel last, CUDA skipped too.
"test_memory_format_nn_ReflectionPad2d_xpu_float32",
"test_memory_format_nn_ReflectionPad2d_xpu_float64",
# AssertionError: False is not true
"test_to_nn_BatchNorm1d_eval_mode_swap_True_set_grad_True_xpu_float32",
"test_to_nn_BatchNorm1d_train_mode_swap_True_set_grad_True_xpu_float32",
Expand Down
2 changes: 1 addition & 1 deletion test/xpu/test_modules_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def _test_to(self, device, dtype, module_info, training, swap, set_grad):
devices = ['xpu']
dtypes = module_info.dtypes
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
requires_grad=False, training=training)
torch.__future__.set_swap_module_params_on_conversion(swap)

for module_input in module_inputs:
Expand Down

0 comments on commit e6dee02

Please sign in to comment.