diff --git a/src/plugins/intel_cpu/tests/functional/behavior/export_import.cpp b/src/plugins/intel_cpu/tests/functional/behavior/export_import.cpp index e9649ddf15292b..f2bf89992a03ea 100644 --- a/src/plugins/intel_cpu/tests/functional/behavior/export_import.cpp +++ b/src/plugins/intel_cpu/tests/functional/behavior/export_import.cpp @@ -2,16 +2,12 @@ // SPDX-License-corer: Apache-2.0 // -#include "openvino/core/any.hpp" #include "openvino/runtime/core.hpp" #include "openvino/runtime/compiled_model.hpp" -#include "openvino/runtime/properties.hpp" #include "common_test_utils/test_common.hpp" #include "ov_models/builders.hpp" - #include -#include namespace { @@ -28,10 +24,10 @@ std::shared_ptr MakeMatMulModel() { auto matmul = std::make_shared(params[0], matmul_const); auto add_const = ngraph::builder::makeConstant(precision, {1, 1024}, std::vector{}, true); - auto add = ngraph::builder::makeEltwise(matmul, add_const, ngraph::helpers::EltwiseTypes::ADD); + auto add = ngraph::builder::makeEltwise(matmul, add_const, ov::test::utils::EltwiseTypes::ADD); auto softmax = std::make_shared(add); - ngraph::NodeVector results{softmax}; + ov::NodeVector results{softmax}; return std::make_shared(results, params, "MatMulModel"); } diff --git a/src/plugins/intel_cpu/tests/functional/behavior/ov_executable_network/properties.cpp b/src/plugins/intel_cpu/tests/functional/behavior/ov_executable_network/properties.cpp index 8f4712bba59b98..6099648bca53e6 100644 --- a/src/plugins/intel_cpu/tests/functional/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/behavior/ov_executable_network/properties.cpp @@ -5,14 +5,11 @@ #include #include "test_utils/properties_test.hpp" -#include -#include "ie_system_conf.h" -#include "ov_models/subgraph_builders.hpp" +#include "openvino/runtime/system_conf.hpp" #include "openvino/runtime/core.hpp" #include "openvino/runtime/compiled_model.hpp" #include "openvino/runtime/properties.hpp" #include "openvino/runtime/intel_cpu/properties.hpp" -#include "functional_test_utils/skip_tests_config.hpp" namespace { @@ -165,7 +162,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckSparseWeigthsDecompression ASSERT_NO_THROW(ov::CompiledModel compiledModel = core.compile_model(model, deviceName)); } -const auto bf16_if_can_be_emulated = InferenceEngine::with_cpu_x86_avx512_core() ? ov::element::bf16 : ov::element::f32; +const auto bf16_if_can_be_emulated = ov::with_cpu_x86_avx512_core() ? ov::element::bf16 : ov::element::f32; TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckExecutionModeIsAvailableInCoreAndModel) { ov::Core ie; diff --git a/src/plugins/intel_cpu/tests/functional/behavior/ov_plugin/properties.cpp b/src/plugins/intel_cpu/tests/functional/behavior/ov_plugin/properties.cpp index 10c0a244fcca31..37d55063a21a3e 100644 --- a/src/plugins/intel_cpu/tests/functional/behavior/ov_plugin/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/behavior/ov_plugin/properties.cpp @@ -9,8 +9,7 @@ #include "openvino/runtime/core.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/intel_cpu/properties.hpp" -#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" -#include "ie_system_conf.h" +#include "openvino/runtime/system_conf.hpp" #include @@ -129,8 +128,8 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinity) { ov::Affinity value = ov::Affinity::NONE; #if (defined(__APPLE__) || defined(_WIN32)) - auto numaNodes = InferenceEngine::getAvailableNUMANodes(); - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + auto numaNodes = ov::get_available_numa_nodes(); + auto coreTypes = ov::get_available_cores_types(); auto defaultBindThreadParameter = ov::Affinity::NONE; if (coreTypes.size() > 1) { defaultBindThreadParameter = ov::Affinity::HYBRID_AWARE; @@ -139,7 +138,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinity) { } #else auto defaultBindThreadParameter = ov::Affinity::CORE; - auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + auto coreTypes = ov::get_available_cores_types(); if (coreTypes.size() > 1) { defaultBindThreadParameter = ov::Affinity::HYBRID_AWARE; } @@ -176,7 +175,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinityCore) { #if defined(OV_CPU_ARM_ENABLE_FP16) const auto expected_precision_for_performance_mode = ov::element::f16; #else - const auto expected_precision_for_performance_mode = InferenceEngine::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32; + const auto expected_precision_for_performance_mode = ov::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32; #endif TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigHintInferencePrecision) { @@ -215,7 +214,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigEnableProfiling) { ASSERT_EQ(enableProfiling, value); } -const auto bf16_if_can_be_emulated = InferenceEngine::with_cpu_x86_avx512_core() ? ov::element::bf16 : ov::element::f32; +const auto bf16_if_can_be_emulated = ov::with_cpu_x86_avx512_core() ? ov::element::bf16 : ov::element::f32; using ExpectedModeAndType = std::pair; const std::map expectedTypeByMode {