Skip to content

Add cases to skip list for test_transformers.py and remove passed cases #1710

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
163 changes: 2 additions & 161 deletions test/xpu/skip_list_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,6 @@
# XPU implementation doesn't claimn FP8 now
# https://github.com/intel/torch-xpu-ops/issues/461
"float8",
# workarounds for the following tests
# https://github.com/intel/torch-xpu-ops/issues/1214
"test_python_ref__refs_exp_xpu_complex128",
"test_python_ref__refs_sigmoid_xpu_complex128",
"test_python_ref_executor__refs_log2_executor_aten_xpu_complex128",
"test_python_ref_executor__refs_exp_executor_aten_xpu_complex128",
"test_python_ref_torch_fallback__refs_log2_xpu_complex128",
"test_python_ref_torch_fallback__refs_log10_xpu_complex128",
"test_python_ref_torch_fallback__refs_sigmoid_xpu_complex128",
"test_python_ref_executor__refs_log10_executor_aten_xpu_complex128",
"test_noncontiguous_samples_histogram_xpu_float32",
"test_python_ref_executor__refs_sigmoid_executor_aten_xpu_complex128",
# TODO: Fix the following tests
"test_out_warning_torch__scaled_mm_xpu",
# To be removed from this file.
# CUDA and XPU both XFAIL now.
"test_out_narrow_copy_xpu_float32",
Expand Down Expand Up @@ -60,10 +46,6 @@
"test_noncontiguous_samples_nn_functional_conv_transpose3d_xpu_int64",
"test_noncontiguous_samples_nn_functional_conv1d_xpu_int64",
"test_noncontiguous_samples_nn_functional_conv2d_xpu_int64",
# Linalg OPs not supported
# RuntimeError: mode only supports CPU AND CUDA device type, got: xpu
# Issue https://github.com/intel/torch-xpu-ops/issues/327
"test_numpy_ref_linalg_tensorinv_xpu_float64",
# RuntimeError: could not create a primitive descriptor for a deconvolution
# https://github.com/intel/torch-xpu-ops/issues/253
"test_variant_consistency_eager_nn_functional_conv_transpose2d_xpu_complex64",
Expand Down Expand Up @@ -106,9 +88,6 @@
# Jiterator is only supported on CUDA and ROCm GPUs, none are available.
# https://github.com/intel/torch-xpu-ops/issues/584
"_jiterator_",
# https://github.com/intel/torch-xpu-ops/issues/157
# Segfault:
"test_dtypes_nn_functional_multi_head_attention_forward_xpu", # https://github.com/intel/torch-xpu-ops/issues/157
# Linalg OPs not supported
"test_dtypes_pca_lowrank_xpu", # https://github.com/intel/torch-xpu-ops/issues/157
"test_dtypes_svd_lowrank_xpu", # https://github.com/intel/torch-xpu-ops/issues/157
Expand Down Expand Up @@ -161,25 +140,20 @@
"test_dtypes_lu_solve_xpu",
"test_dtypes_lu_xpu",
"test_dtypes_mv_xpu",
"test_dtypes_nn_functional_scaled_dot_product_attention_xpu",
"test_dtypes_norm_nuc_xpu",
"test_dtypes_pinverse_xpu",
"test_dtypes_qr_xpu",
"test_dtypes_svd_xpu",
"test_dtypes_tensordot_xpu",
"test_dtypes_triangular_solve_xpu",
"test_noncontiguous_samples___rmatmul___xpu_complex64",
"test_noncontiguous_samples___rmatmul___xpu_int64",
"test_noncontiguous_samples_addbmm_xpu_complex64",
"test_noncontiguous_samples_addbmm_xpu_float32",
"test_noncontiguous_samples_addbmm_xpu_int64",
"test_noncontiguous_samples_addmm_decomposed_xpu_complex64",
"test_noncontiguous_samples_addmm_decomposed_xpu_int64",
"test_noncontiguous_samples_addmm_xpu_complex64",
"test_noncontiguous_samples_addmm_xpu_float32",
"test_noncontiguous_samples_addmm_xpu_int64",
"test_noncontiguous_samples_addmv_xpu_complex64",
"test_noncontiguous_samples_addmv_xpu_float32",
"test_noncontiguous_samples_addmv_xpu_int64",
"test_noncontiguous_samples_addr_xpu_complex64",
"test_noncontiguous_samples_baddbmm_xpu_complex64",
Expand All @@ -194,8 +168,6 @@
"test_noncontiguous_samples_einsum_xpu_complex64",
"test_noncontiguous_samples_einsum_xpu_int64",
"test_noncontiguous_samples_geqrf_xpu_complex64",
"test_noncontiguous_samples_inner_xpu_complex64",
"test_noncontiguous_samples_inner_xpu_int64",
"test_noncontiguous_samples_linalg_cholesky_ex_xpu_complex64",
"test_noncontiguous_samples_linalg_cholesky_xpu_complex64",
"test_noncontiguous_samples_linalg_cond_xpu_complex64",
Expand Down Expand Up @@ -258,11 +230,7 @@
"test_numpy_ref_addbmm_xpu_float64",
"test_numpy_ref_addbmm_xpu_int64",
"test_numpy_ref_linalg_tensorinv_xpu_complex128",
"test_out_addbmm_xpu_float32",
"test_out_addmm_xpu_float32",
"test_out_addmv_xpu_float32",
"test_out_baddbmm_xpu_float32",
"test_out_mm_xpu_float32",
"test_out_mv_xpu_float32",
"test_out_requires_grad_error_addbmm_xpu_complex64",
"test_out_requires_grad_error_addmm_decomposed_xpu_complex64",
Expand All @@ -273,7 +241,6 @@
"test_out_requires_grad_error_cholesky_inverse_xpu_complex64",
"test_out_requires_grad_error_cholesky_solve_xpu_complex64",
"test_out_requires_grad_error_cholesky_xpu_complex64",
"test_out_requires_grad_error_inner_xpu_complex64",
"test_out_requires_grad_error_linalg_cholesky_ex_xpu_complex64",
"test_out_requires_grad_error_linalg_cholesky_xpu_complex64",
"test_out_requires_grad_error_linalg_eig_xpu_complex64",
Expand All @@ -300,38 +267,23 @@
"test_out_requires_grad_error_qr_xpu_complex64",
"test_out_requires_grad_error_tensordot_xpu_complex64",
"test_out_requires_grad_error_triangular_solve_xpu_complex64",
"test_out_warning_addmm_decomposed_xpu",
"test_out_warning_addmm_xpu",
"test_out_warning_addmv_xpu",
"test_out_warning_baddbmm_xpu",
"test_out_warning_bmm_xpu",
"test_out_warning_matmul_xpu",
"test_out_warning_mm_xpu",
"test_out_warning_mv_xpu",
"test_out_warning_nn_functional_linear_xpu",
"test_python_ref__refs_linalg_svd_xpu_complex128",
"test_python_ref__refs_linalg_svd_xpu_complex64",
"test_python_ref__refs_linalg_svd_xpu_float64",
"test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_complex128",
"test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_complex64",
"test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_float64",
"test_python_ref_executor__refs_nn_functional_pdist_executor_aten_xpu_float64",
"test_python_ref_meta__refs_linalg_svd_xpu_complex128",
"test_python_ref_meta__refs_linalg_svd_xpu_complex64",
"test_python_ref_meta__refs_linalg_svd_xpu_float64",
"test_python_ref_meta__refs_nn_functional_pdist_xpu_float64",
"test_python_ref_torch_fallback__refs_linalg_svd_xpu_complex128",
"test_python_ref_torch_fallback__refs_linalg_svd_xpu_complex64",
"test_python_ref_torch_fallback__refs_linalg_svd_xpu_float64",
"test_python_ref_torch_fallback__refs_nn_functional_pdist_xpu_float64",
"test_variant_consistency_eager___rmatmul___xpu_complex64",
"test_variant_consistency_eager_addmm_decomposed_xpu_complex64",
"test_variant_consistency_eager_addmm_xpu_complex64",
"test_variant_consistency_eager_addmm_xpu_float32",
"test_variant_consistency_eager_addmv_xpu_complex64",
"test_variant_consistency_eager_addmv_xpu_float32",
"test_variant_consistency_eager_baddbmm_xpu_complex64",
"test_variant_consistency_eager_baddbmm_xpu_float32",
"test_variant_consistency_eager_bmm_xpu_complex64",
"test_variant_consistency_eager_cholesky_inverse_xpu_complex64",
"test_variant_consistency_eager_cholesky_solve_xpu_complex64",
Expand All @@ -340,7 +292,6 @@
"test_variant_consistency_eager_cov_xpu_complex64",
"test_variant_consistency_eager_einsum_xpu_complex64",
"test_variant_consistency_eager_geqrf_xpu_complex64",
"test_variant_consistency_eager_inner_xpu_complex64",
"test_variant_consistency_eager_linalg_cholesky_ex_xpu_complex64",
"test_variant_consistency_eager_linalg_cholesky_xpu_complex64",
"test_variant_consistency_eager_linalg_cond_xpu_complex64",
Expand Down Expand Up @@ -414,7 +365,6 @@
"test_conj_view_cov_xpu_complex64",
"test_conj_view_einsum_xpu_complex64",
"test_conj_view_geqrf_xpu_complex64",
"test_conj_view_inner_xpu_complex64",
"test_conj_view_linalg_cholesky_ex_xpu_complex64",
"test_conj_view_linalg_cholesky_xpu_complex64",
"test_conj_view_linalg_cond_xpu_complex64",
Expand Down Expand Up @@ -478,7 +428,6 @@
"test_neg_conj_view_corrcoef_xpu_complex128",
"test_neg_conj_view_cov_xpu_complex128",
"test_neg_conj_view_geqrf_xpu_complex128",
"test_neg_conj_view_inner_xpu_complex128",
"test_neg_conj_view_linalg_cholesky_ex_xpu_complex128",
"test_neg_conj_view_linalg_cholesky_xpu_complex128",
"test_neg_conj_view_linalg_cond_xpu_complex128",
Expand Down Expand Up @@ -520,73 +469,11 @@
"test_neg_conj_view_qr_xpu_complex128",
"test_neg_conj_view_tensordot_xpu_complex128",
"test_neg_conj_view_triangular_solve_xpu_complex128",
"test_neg_view___rmatmul___xpu_float64",
"test_neg_view__refs_linalg_svd_xpu_float64",
"test_neg_view__refs_nn_functional_pdist_xpu_float64",
"test_neg_view_addbmm_xpu_float64",
"test_neg_view_addmm_decomposed_xpu_float64",
"test_neg_view_addmm_xpu_float64",
"test_neg_view_addmv_xpu_float64",
"test_neg_view_addr_xpu_float64",
"test_neg_view_baddbmm_xpu_float64",
"test_neg_view_bmm_xpu_float64",
"test_neg_view_cdist_xpu_float64",
"test_neg_view_cholesky_inverse_xpu_float64",
"test_neg_view_cholesky_solve_xpu_float64",
"test_neg_view_cholesky_xpu_float64",
"test_neg_view_corrcoef_xpu_float64",
"test_neg_view_cov_xpu_float64",
"test_neg_view_einsum_xpu_float64",
"test_neg_view_geqrf_xpu_float64",
"test_neg_view_inner_xpu_float64",
"test_neg_view_linalg_cholesky_ex_xpu_float64",
"test_neg_view_linalg_cholesky_xpu_float64",
"test_neg_view_linalg_cond_xpu_float64",
"test_neg_view_linalg_eig_xpu_float64",
"test_neg_view_linalg_eigh_xpu_float64",
"test_neg_view_linalg_eigvalsh_xpu_float64",
"test_neg_view_linalg_householder_product_xpu_float64",
"test_neg_view_linalg_inv_ex_xpu_float64",
"test_neg_view_linalg_inv_xpu_float64",
"test_neg_view_linalg_ldl_factor_ex_xpu_float64",
"test_neg_view_linalg_ldl_factor_xpu_float64",
"test_neg_view_linalg_ldl_solve_xpu_float64",
"test_neg_view_linalg_lstsq_grad_oriented_xpu_float64",
"test_neg_view_linalg_lstsq_xpu_float64",
"test_neg_view_linalg_matrix_norm_xpu_float64",
"test_neg_view_linalg_matrix_power_xpu_float64",
"test_neg_view_linalg_matrix_rank_hermitian_xpu_float64",
"test_neg_view_linalg_matrix_rank_xpu_float64",
"test_neg_view_linalg_multi_dot_xpu_float64",
"test_neg_view_linalg_norm_subgradients_at_zero_xpu_float64",
"test_neg_view_linalg_norm_xpu_float64",
"test_neg_view_linalg_pinv_hermitian_xpu_float64",
"test_neg_view_linalg_pinv_singular_xpu_float64",
"test_neg_view_linalg_pinv_xpu_float64",
"test_neg_view_linalg_qr_xpu_float64",
"test_neg_view_linalg_solve_triangular_xpu_float64",
"test_neg_view_linalg_svd_xpu_float64",
"test_neg_view_linalg_svdvals_xpu_float64",
"test_neg_view_linalg_tensorinv_xpu_float64",
"test_neg_view_linalg_tensorsolve_xpu_float64",
"test_neg_view_logdet_xpu_float64",
"test_neg_view_lu_xpu_float64",
"test_neg_view_matmul_xpu_float64",
"test_neg_view_mm_xpu_float64",
"test_neg_view_mv_xpu_float64",
"test_neg_view_nn_functional_bilinear_xpu_float64",
"test_neg_view_nn_functional_linear_xpu_float64",
"test_neg_view_nn_functional_multi_head_attention_forward_xpu_float64",
"test_neg_view_nn_functional_scaled_dot_product_attention_xpu_float64",
"test_neg_view_norm_nuc_xpu_float64",
"test_neg_view_ormqr_xpu_float64",
"test_neg_view_pca_lowrank_xpu_float64",
"test_neg_view_pinverse_xpu_float64",
"test_neg_view_qr_xpu_float64",
"test_neg_view_svd_lowrank_xpu_float64",
"test_neg_view_svd_xpu_float64",
"test_neg_view_tensordot_xpu_float64",
"test_neg_view_triangular_solve_xpu_float64",
"test_noncontiguous_samples_pca_lowrank_xpu_complex64",
"test_noncontiguous_samples_svd_lowrank_xpu_complex64",
"test_variant_consistency_eager_pca_lowrank_xpu_complex64",
Expand All @@ -607,35 +494,10 @@
"test_dtypes_histogram_xpu",
# Unexpected success, CUDA got XFAIL because CUDA does not have historgramadd supported
"test_errors_histogramdd_xpu",
# 2025 bundle std::pow complex result is different on host and device
"test_python_ref__refs_square_xpu_complex64",
"test_python_ref_torch_fallback__refs_square_xpu_complex64",
"test_python_ref_torch_fallback__refs_exp_xpu_complex128",
# Failed on rolling driver, passed on preci
"test_python_ref__refs_div_trunc_rounding_xpu_float64",
"test_python_ref_executor__refs_div_trunc_rounding_executor_aten_xpu_float64",
"test_python_ref_torch_fallback__refs_div_trunc_rounding_xpu_float64",
# TODO: passed from source code building version, investigate
"test_python_ref__refs_log2_xpu_complex128",
# The following dtypes did not work in backward but are listed by the OpInfo: {torch.bfloat16}.
"test_dtypes_fft_fft2_xpu",
"test_dtypes_fft_fft_xpu",
"test_dtypes_fft_fftn_xpu",
"test_dtypes_fft_hfft2_xpu",
"test_dtypes_fft_hfft_xpu",
"test_dtypes_fft_hfftn_xpu",
"test_dtypes_fft_ifft2_xpu",
"test_dtypes_fft_ifft_xpu",
"test_dtypes_fft_ifftn_xpu",
"test_dtypes_fft_ihfft2_xpu",
"test_dtypes_fft_ihfft_xpu",
"test_dtypes_fft_ihfftn_xpu",
"test_dtypes_fft_irfft2_xpu",
"test_dtypes_fft_irfft_xpu",
"test_dtypes_fft_irfftn_xpu",
"test_dtypes_fft_rfft2_xpu",
"test_dtypes_fft_rfft_xpu",
"test_dtypes_fft_rfftn_xpu",
),
"test_binary_ufuncs_xpu.py": (
"test_fmod_remainder_by_zero_integral_xpu_int64", # zero division is an undefined behavior: different handles on different backends
Expand Down Expand Up @@ -718,25 +580,11 @@
# oneDNN issues
# Double and complex datatype matmul is not supported in oneDNN
# https://github.com/intel/torch-xpu-ops/issues/253
"test_sdp_math_gradcheck_contiguous_inputs_False_xpu",
"test_sdp_math_gradcheck_contiguous_inputs_True_xpu",
"test_transformerencoder_batch_first_True_training_True_enable_nested_tensor_True_xpu",
"test_transformerencoder_batch_first_True_training_True_enable_nested_tensor_False_xpu",
"test_transformerencoder_batch_first_True_training_False_enable_nested_tensor_True_xpu",
"test_transformerencoder_batch_first_True_training_False_enable_nested_tensor_False_xpu",
"test_transformerencoder_batch_first_False_training_True_enable_nested_tensor_True_xpu",
"test_transformerencoder_batch_first_False_training_True_enable_nested_tensor_False_xpu",
"test_transformerencoder_batch_first_False_training_False_enable_nested_tensor_True_xpu",
"test_transformerencoder_batch_first_False_training_False_enable_nested_tensor_False_xpu",
"test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_5_xpu",
"test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_2_xpu",
"test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_0_xpu",
"test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_5_xpu",
"test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_2_xpu",
"test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_0_xpu",
"test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_5_xpu",
"test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_2_xpu",
"test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_0_xpu",
"test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_5_xpu",
"test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_2_xpu",
"test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_0_xpu",
Expand All @@ -745,33 +593,26 @@
"test_scaled_dot_product_attention_4D_input_dim_2D_attn_mask_dropout_p_0_0_xpu",
"test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_5_xpu",
"test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_2_xpu",
"test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_0_xpu",
"test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_5_xpu",
"test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_2_xpu",
"test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_0_xpu",
"test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_5_xpu",
"test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_2_xpu",
"test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_0_xpu",
"test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_5_xpu",
"test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_2_xpu",
"test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_0_xpu",
"test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_5_xpu",
"test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_2_xpu",
"test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_0_xpu",
# https://github.com/intel/torch-xpu-ops/issues/1432
"test_multiheadattention_fastpath_attn_mask_attn_mask_dim_2_key_padding_mask_dim_2_bool_xpu",
"test_multiheadattention_fastpath_attn_mask_attn_mask_dim_3_key_padding_mask_dim_2_bool_xpu",
"test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_False_use_autocast_False_d_model_12_xpu",
"test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_False_use_autocast_True_d_model_12_xpu",
"test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_True_use_autocast_False_d_model_12_xpu",
"test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_True_use_autocast_True_d_model_12_xpu",
# XPU didn't support torch._scaled_dot_product_efficient_attention and F.scaled_dot_product_attention
"test_mem_eff_attention_fail_with_batch_size_geq_65536",
),
"test_complex_xpu.py": None,
"test_modules_xpu.py": (
# oneDNN issues
# RuntimeError: Double and complex datatype matmul is not supported in oneDNN
"test_cpu_gpu_parity_nn_Bilinear_xpu_float64",
"test_cpu_gpu_parity_nn_GRUCell_xpu_float64",
"test_cpu_gpu_parity_nn_GRU_eval_mode_xpu_float64",
"test_cpu_gpu_parity_nn_GRU_train_mode_xpu_float64",
"test_cpu_gpu_parity_nn_LSTMCell_xpu_float64",
Expand Down
Loading
Loading