Skip to content

Commit

Permalink
CPU func/beh tests to API 2.0 (openvinotoolkit#21236)
Browse files Browse the repository at this point in the history
  • Loading branch information
vurusovs authored Nov 22, 2023
1 parent 9ee5d86 commit bd7b6b3
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 18 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,12 @@
// SPDX-License-corer: Apache-2.0
//

#include "openvino/core/any.hpp"
#include "openvino/runtime/core.hpp"
#include "openvino/runtime/compiled_model.hpp"
#include "openvino/runtime/properties.hpp"
#include "common_test_utils/test_common.hpp"
#include "ov_models/builders.hpp"


#include <openvino/opsets/opset9.hpp>
#include <ie/ie_core.hpp>

namespace {

Expand All @@ -28,10 +24,10 @@ std::shared_ptr<ov::Model> MakeMatMulModel() {
auto matmul = std::make_shared<ov::op::v0::MatMul>(params[0], matmul_const);

auto add_const = ngraph::builder::makeConstant(precision, {1, 1024}, std::vector<float>{}, true);
auto add = ngraph::builder::makeEltwise(matmul, add_const, ngraph::helpers::EltwiseTypes::ADD);
auto add = ngraph::builder::makeEltwise(matmul, add_const, ov::test::utils::EltwiseTypes::ADD);
auto softmax = std::make_shared<ov::opset9::Softmax>(add);

ngraph::NodeVector results{softmax};
ov::NodeVector results{softmax};
return std::make_shared<ov::Model>(results, params, "MatMulModel");
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,11 @@
#include <gtest/gtest.h>

#include "test_utils/properties_test.hpp"
#include <common_test_utils/test_assertions.hpp>
#include "ie_system_conf.h"
#include "ov_models/subgraph_builders.hpp"
#include "openvino/runtime/system_conf.hpp"
#include "openvino/runtime/core.hpp"
#include "openvino/runtime/compiled_model.hpp"
#include "openvino/runtime/properties.hpp"
#include "openvino/runtime/intel_cpu/properties.hpp"
#include "functional_test_utils/skip_tests_config.hpp"

namespace {

Expand Down Expand Up @@ -165,7 +162,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckSparseWeigthsDecompression
ASSERT_NO_THROW(ov::CompiledModel compiledModel = core.compile_model(model, deviceName));
}

const auto bf16_if_can_be_emulated = InferenceEngine::with_cpu_x86_avx512_core() ? ov::element::bf16 : ov::element::f32;
const auto bf16_if_can_be_emulated = ov::with_cpu_x86_avx512_core() ? ov::element::bf16 : ov::element::f32;

TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckExecutionModeIsAvailableInCoreAndModel) {
ov::Core ie;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@
#include "openvino/runtime/core.hpp"
#include "openvino/core/type/element_type.hpp"
#include "openvino/runtime/intel_cpu/properties.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
#include "ie_system_conf.h"
#include "openvino/runtime/system_conf.hpp"

#include <algorithm>

Expand Down Expand Up @@ -129,8 +128,8 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinity) {
ov::Affinity value = ov::Affinity::NONE;

#if (defined(__APPLE__) || defined(_WIN32))
auto numaNodes = InferenceEngine::getAvailableNUMANodes();
auto coreTypes = InferenceEngine::getAvailableCoresTypes();
auto numaNodes = ov::get_available_numa_nodes();
auto coreTypes = ov::get_available_cores_types();
auto defaultBindThreadParameter = ov::Affinity::NONE;
if (coreTypes.size() > 1) {
defaultBindThreadParameter = ov::Affinity::HYBRID_AWARE;
Expand All @@ -139,7 +138,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinity) {
}
#else
auto defaultBindThreadParameter = ov::Affinity::CORE;
auto coreTypes = InferenceEngine::getAvailableCoresTypes();
auto coreTypes = ov::get_available_cores_types();
if (coreTypes.size() > 1) {
defaultBindThreadParameter = ov::Affinity::HYBRID_AWARE;
}
Expand Down Expand Up @@ -176,7 +175,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinityCore) {
#if defined(OV_CPU_ARM_ENABLE_FP16)
const auto expected_precision_for_performance_mode = ov::element::f16;
#else
const auto expected_precision_for_performance_mode = InferenceEngine::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32;
const auto expected_precision_for_performance_mode = ov::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32;
#endif

TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigHintInferencePrecision) {
Expand Down Expand Up @@ -215,7 +214,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigEnableProfiling) {
ASSERT_EQ(enableProfiling, value);
}

const auto bf16_if_can_be_emulated = InferenceEngine::with_cpu_x86_avx512_core() ? ov::element::bf16 : ov::element::f32;
const auto bf16_if_can_be_emulated = ov::with_cpu_x86_avx512_core() ? ov::element::bf16 : ov::element::f32;
using ExpectedModeAndType = std::pair<ov::hint::ExecutionMode, ov::element::Type>;

const std::map<ov::hint::ExecutionMode, ExpectedModeAndType> expectedTypeByMode {
Expand Down

0 comments on commit bd7b6b3

Please sign in to comment.