From a80aea6c3e27e83c5b0bd6c35f253514dd1c4eb1 Mon Sep 17 00:00:00 2001 From: chenhu-wang Date: Wed, 13 Nov 2024 03:19:50 -0500 Subject: [PATCH] test update --- .../custom/subgraph_tests/src/x64/mha.cpp | 3 +++ .../shared_tests_instances/snippets/mha.cpp | 13 +++++++++++++ .../shared_tests_instances/snippets/utils.hpp | 11 +++++++++++ 3 files changed, 27 insertions(+) diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/mha.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/mha.cpp index 955619fc82ae67..a94f52be91df02 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/mha.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/mha.cpp @@ -256,6 +256,9 @@ TEST_P(MHATest, CompareWithRefs) { if (inputPrecisions[0] == ElementType::bf16 && !ov::with_cpu_x86_bfloat16()) GTEST_SKIP(); + if (inputPrecisions[0] == ElementType::f16 && !ov::with_cpu_x86_avx512_core_amx_fp16()) + GTEST_SKIP(); + if (!ov::with_cpu_x86_avx512_core()) GTEST_SKIP(); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha.cpp index 63f5176684ccc1..93e4e3df4e856b 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha.cpp @@ -124,6 +124,19 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_MHABF16_4D, ::testing::Values(CPUTestUtils::empty_plugin_config)), MHA::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Snippets_MHAFP16_4D, + MHA, + ::testing::Combine(::testing::ValuesIn(transposedShape_4D()), + ::testing::ValuesIn(precision_fp16_if_supported(4)), + ::testing::Values(ov::element::f32), + ::testing::ValuesIn({false, true}), + ::testing::Values(MHA::default_thread_count), + ::testing::Values(1), // MHA + 5 Converts + 1 Transpose on output + ::testing::Values(1), // MHA + 5 Converts on inputs and output + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(CPUTestUtils::empty_plugin_config)), + MHA::getTestCaseName); + INSTANTIATE_TEST_SUITE_P(smoke_Snippets_MHAEnforceBF16, MHA, ::testing::Combine(::testing::ValuesIn(transposedShape_4D()), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/utils.hpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/utils.hpp index 6c0d54da973086..6815cdab671cea 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/utils.hpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/utils.hpp @@ -16,6 +16,10 @@ static inline bool is_bf16_supported_by_brgemm() { return ov::with_cpu_x86_bfloat16() || ov::with_cpu_x86_avx512_core_amx_bf16(); } +static inline bool is_fp16_supported_by_brgemm() { + return ov::with_cpu_x86_avx512_core_amx_fp16(); +} + static inline bool is_i8_supported_by_brgemm() { return ov::with_cpu_x86_avx512_core_vnni() || ov::with_cpu_x86_avx512_core_amx_int8(); } @@ -33,6 +37,13 @@ static inline std::vector> precision_bf16_if_supporte return prc; } +static inline std::vector> precision_fp16_if_supported(size_t count) { + std::vector> prc; + if (is_fp16_supported_by_brgemm()) + prc.emplace_back(std::vector(count, element::f16)); + return prc; +} + static inline std::vector> quantized_precisions_if_supported() { std::vector> prc = {}; // In Snippets MatMul INT8 is supported only on VNNI/AMX platforms