|
749 | 749 | ),
|
750 | 750 |
|
751 | 751 | "test_scatter_gather_ops_xpu.py": (
|
752 |
| - "test_gather_backward_with_empty_index_tensor_sparse_grad_True_xpu_float32", # Could not run 'aten::_sparse_coo_tensor_with_dims_and_tensors' with arguments from the 'SparseXPU' backend. |
753 |
| - "test_gather_backward_with_empty_index_tensor_sparse_grad_True_xpu_float64", # Could not run 'aten::_sparse_coo_tensor_with_dims_and_tensors' with arguments from the 'SparseXPU' backend. |
| 752 | + # NotImplementedError: Could not run 'aten::_indices' with arguments from the 'SparseXPU' backend. |
| 753 | + # https://github.com/intel/torch-xpu-ops/issues/484 |
| 754 | + "test_gather_backward_with_empty_index_tensor_sparse_grad_True_xpu_float32", |
| 755 | + "test_gather_backward_with_empty_index_tensor_sparse_grad_True_xpu_float64", |
754 | 756 | ),
|
755 | 757 |
|
756 | 758 | "test_autograd_fallback.py": None,
|
757 | 759 |
|
758 |
| - "test_sort_and_select_xpu.py":("test_sort_large_slice_xpu",), # Hard code CUDA |
| 760 | + "test_sort_and_select_xpu.py": ("test_sort_large_slice_xpu",), # Hard code CUDA |
759 | 761 |
|
760 | 762 | "nn/test_embedding_xpu.py": (
|
761 | 763 | # NotImplementedError: Could not run 'aten::_indices' with arguments from the 'SparseXPU' backend.
|
|
821 | 823 | ),
|
822 | 824 |
|
823 | 825 | "test_transformers_xpu.py": (
|
| 826 | + # https://github.com/intel/torch-xpu-ops/issues/761 |
824 | 827 | # AssertionError: False is not true
|
825 | 828 | # CPU fallback failure. To support aten::transformer_encoder_layer_forward with proper priority.
|
826 | 829 | "test_disable_fastpath_xpu",
|
|
829 | 832 | # Could not run 'aten::_to_copy' with arguments from the 'NestedTensorXPU' backend
|
830 | 833 | "test_with_nested_tensor_input_xpu",
|
831 | 834 | # Double and complex datatype matmul is not supported in oneDNN
|
| 835 | + # https://github.com/intel/torch-xpu-ops/issues/253 |
832 | 836 | "test_sdp_math_gradcheck_contiguous_inputs_False_xpu",
|
833 | 837 | "test_sdp_math_gradcheck_contiguous_inputs_True_xpu",
|
834 | 838 | "test_transformerencoder_batch_first_True_training_True_enable_nested_tensor_True_xpu",
|
|
871 | 875 | "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_0_xpu",
|
872 | 876 | ),
|
873 | 877 |
|
874 |
| - "test_complex_xpu.py": ( |
875 |
| - # Skip CPU case |
876 |
| - "test_eq_xpu_complex128", |
877 |
| - "test_eq_xpu_complex64", |
878 |
| - "test_ne_xpu_complex128", |
879 |
| - "test_ne_xpu_complex64", |
880 |
| - ), |
| 878 | + "test_complex_xpu.py": None, |
881 | 879 |
|
882 | 880 | "test_modules_xpu.py": (
|
883 | 881 | # oneDNN issues
|
|
1217 | 1215 |
|
1218 | 1216 | "test_dataloader_xpu.py": (
|
1219 | 1217 | # Skip for XPU didn't support
|
1220 |
| - "test_nested_tensor_multiprocessing", |
| 1218 | + # https://github.com/intel/torch-xpu-ops/issues/613 |
| 1219 | + "test_nested_tensor_multiprocessing_context_forkserver_xpu", |
| 1220 | + "test_nested_tensor_multiprocessing_context_spawn_xpu", |
1221 | 1221 | # pinned memory issue
|
| 1222 | + # https://github.com/intel/torch-xpu-ops/issues/296 |
1222 | 1223 | "test_custom_batch_pin",
|
1223 | 1224 | "test_sequential_pin_memory",
|
1224 | 1225 | "test_shuffle_pin_memory",
|
|
1236 | 1237 | "test_kaiser_window_xpu",
|
1237 | 1238 | ),
|
1238 | 1239 |
|
1239 |
| - "test_autocast_xpu.py": ( |
1240 |
| - # Frontend API support |
1241 |
| - # Unsupported XPU runtime functionality, '_set_cached_tensors_enabled' |
1242 |
| - # https://github.com/intel/torch-xpu-ops/issues/223 |
1243 |
| - "test_cache_disabled", |
1244 |
| - ), |
| 1240 | + "test_autocast_xpu.py": None, |
1245 | 1241 |
|
1246 | 1242 | "test_autograd_xpu.py": (
|
| 1243 | + # https://github.com/intel/torch-xpu-ops/issues/618 |
1247 | 1244 | # c10::NotImplementedError
|
1248 | 1245 | "test_autograd_composite_implicit_and_dispatch_registration_xpu",
|
1249 | 1246 | "test_autograd_multiple_dispatch_registrations_xpu",
|
|
1258 | 1255 | "test_checkpointing_without_reentrant_memory_savings",
|
1259 | 1256 | "test_flops_and_mem",
|
1260 | 1257 | "test_profiler_emit_nvtx_xpu",
|
1261 |
| - # RuntimeError: grad can be implicitly created only for scalar outputs |
1262 |
| - "test_reentrant_parent_error_on_cpu_xpu", |
1263 | 1258 | # Double and complex datatype matmul is not supported in oneDNN
|
1264 | 1259 | "test_mv_grad_stride_0_xpu",
|
1265 | 1260 | # module 'torch._C' has no attribute '_scatter'
|
1266 | 1261 | "test_checkpointing_without_reentrant_dataparallel",
|
1267 | 1262 | "test_dataparallel_saved_tensors_hooks",
|
1268 |
| - # AssertionError: "none of output has requires_grad=True" does not match "PyTorch was compiled without CUDA support" |
1269 |
| - "test_checkpointing_without_reentrant_detached_tensor_use_reentrant_True", |
1270 |
| - # Skip device count < 2 |
1271 |
| - "test_backward_device_xpu", |
1272 |
| - "test_inputbuffer_add_multidevice_xpu", |
1273 |
| - "test_unused_output_device_xpu", |
1274 |
| - # Skip CPU case |
1275 |
| - "test_copy__xpu", |
1276 |
| - "test_checkpointing_non_reentrant_autocast_cpu", |
1277 |
| - "test_per_dispatch_key_input_saving_xpu", |
1278 | 1263 | # Runtime error after enabling PTI
|
1279 | 1264 | # RuntimeError: Fail to enable Kineto Profiler on XPU due to error code: 200
|
1280 | 1265 | # https://github.com/intel/torch-xpu-ops/issues/731
|
|
1286 | 1271 | # CPU/CUDA bias code in aten::mode_out
|
1287 | 1272 | # https://github.com/intel/torch-xpu-ops/issues/327
|
1288 | 1273 | # RuntimeError: mode only supports CPU AND CUDA device type, got: xpu
|
1289 |
| - "test_dim_reduction", |
1290 |
| - "test_mode", |
| 1274 | + "test_mode_xpu", |
| 1275 | + "test_mode_wrong_dtype_xpu", |
1291 | 1276 | "test_dim_reduction_fns_fn_name_mode",
|
1292 |
| - # CUDA skips the case in opdb. |
1293 |
| - # https://github.com/intel/torch-xpu-ops/issues/222 |
1294 |
| - "test_ref_extremal_values_mean_xpu_complex64", |
1295 |
| - # CPU fallback fails (CPU vs Numpy). |
1296 |
| - "test_ref_small_input_masked_prod_xpu_float16", |
1297 | 1277 | ),
|
1298 | 1278 |
|
1299 | 1279 | "test_unary_ufuncs_xpu.py": (
|
|
2778 | 2758 |
|
2779 | 2759 | "nn/test_convolution_xpu.py": (
|
2780 | 2760 | # XPU unsupport ops, skip.
|
| 2761 | + # https://github.com/intel/torch-xpu-ops/issues/348 |
2781 | 2762 | "test_cudnn_convolution_relu_xpu_float16",
|
2782 | 2763 | "test_cudnn_convolution_relu_xpu_float32",
|
2783 | 2764 | "test_cudnn_convolution_add_relu_xpu_float16",
|
|
2787 | 2768 | "test_Conv2d_groups_nobias",
|
2788 | 2769 | ),
|
2789 | 2770 |
|
2790 |
| - "test_dynamic_shapes_xpu.py": ( |
2791 |
| - # issue 746, new ut failures introduced by new pytorch |
2792 |
| - "test_method_fn_add_first_type_int_second_type_float", |
2793 |
| - "test_method_fn_mul_first_type_int_second_type_float", |
2794 |
| - "test_method_fn_sub_first_type_int_second_type_float", |
2795 |
| - ), |
| 2771 | + "test_dynamic_shapes_xpu.py": None, |
2796 | 2772 |
|
2797 | 2773 | "nn/test_load_state_dict_xpu.py": None,
|
2798 | 2774 |
|
|
2803 | 2779 | ),
|
2804 | 2780 |
|
2805 | 2781 | "nn/test_parametrization_xpu.py": None,
|
| 2782 | + |
| 2783 | + "test_segment_reductions_xpu.py": None, |
2806 | 2784 | }
|
0 commit comments